Open In Colab

Traffic Sign Detection - SSD MobileNet FPNLite with image augmentation¶

In [ ]:
# Reference: https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/
In [ ]:
# Use the following package versions for compatibiltiy
!pip install "PyYAML==5.3" "numpy==1.24.3"
Collecting PyYAML==5.3
  Downloading PyYAML-5.3.tar.gz (268 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 268.2/268.2 kB 2.8 MB/s eta 0:00:00
  Preparing metadata (setup.py) ... done
Collecting numpy==1.24.3
  Downloading numpy-1.24.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (17.3 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 17.3/17.3 MB 49.8 MB/s eta 0:00:00
Building wheels for collected packages: PyYAML
  Building wheel for PyYAML (setup.py) ... done
  Created wheel for PyYAML: filename=PyYAML-5.3-cp310-cp310-linux_x86_64.whl size=44244 sha256=143c4fbb6997e254a5f0dc0d17023005b1d4d95b47aa34b2238d179495ff205d
  Stored in directory: /root/.cache/pip/wheels/0d/72/68/a263cfc14175636cf26bada99f13b735be1b60a11318e08bfc
Successfully built PyYAML
Installing collected packages: PyYAML, numpy
  Attempting uninstall: PyYAML
    Found existing installation: PyYAML 6.0.1
    Uninstalling PyYAML-6.0.1:
      Successfully uninstalled PyYAML-6.0.1
  Attempting uninstall: numpy
    Found existing installation: numpy 1.22.4
    Uninstalling numpy-1.22.4:
      Successfully uninstalled numpy-1.22.4
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
dask 2022.12.1 requires pyyaml>=5.3.1, but you have pyyaml 5.3 which is incompatible.
flax 0.7.0 requires PyYAML>=5.4.1, but you have pyyaml 5.3 which is incompatible.
numba 0.56.4 requires numpy<1.24,>=1.18, but you have numpy 1.24.3 which is incompatible.
tensorflow 2.12.0 requires numpy<1.24,>=1.22, but you have numpy 1.24.3 which is incompatible.
Successfully installed PyYAML-5.3 numpy-1.24.3
In [ ]:
import os
import tarfile
from zipfile import ZipFile
In [ ]:
# Create a root working directory

if not os.path.exists("Tensorflow"):
    os.mkdir("Tensorflow")
In [ ]:
# Define model name

MODEL_NAME = "ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8"

TensorFlow Model Garden¶

In [ ]:
# Clone model garden

%cd "Tensorflow"
!git clone "https://github.com/tensorflow/models.git"
/content/Tensorflow
Cloning into 'models'...
remote: Enumerating objects: 86752, done.
remote: Counting objects: 100% (506/506), done.
remote: Compressing objects: 100% (246/246), done.
remote: Total 86752 (delta 264), reused 483 (delta 254), pack-reused 86246
Receiving objects: 100% (86752/86752), 598.91 MiB | 16.87 MiB/s, done.
Resolving deltas: 100% (62146/62146), done.
In [ ]:
# Install object detection package

%cd "models/research"

!protoc object_detection/protos/*.proto --python_out=.
%cp object_detection/packages/tf2/setup.py .
!pip install .
/content/Tensorflow/models/research
Processing /content/Tensorflow/models/research
  Preparing metadata (setup.py) ... done
Collecting avro-python3 (from object-detection==0.1)
  Downloading avro-python3-1.10.2.tar.gz (38 kB)
  Preparing metadata (setup.py) ... done
Collecting apache-beam (from object-detection==0.1)
  Downloading apache_beam-2.49.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (14.6 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 14.6/14.6 MB 32.3 MB/s eta 0:00:00
Requirement already satisfied: pillow in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (8.4.0)
Requirement already satisfied: lxml in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (4.9.3)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (3.7.1)
Requirement already satisfied: Cython in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (0.29.36)
Requirement already satisfied: contextlib2 in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (21.6.0)
Requirement already satisfied: tf-slim in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (1.1.0)
Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (1.16.0)
Requirement already satisfied: pycocotools in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (2.0.6)
Collecting lvis (from object-detection==0.1)
  Downloading lvis-0.5.3-py3-none-any.whl (14 kB)
Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (1.10.1)
Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (1.5.3)
Collecting tf-models-official>=2.5.1 (from object-detection==0.1)
  Downloading tf_models_official-2.13.1-py2.py3-none-any.whl (2.6 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.6/2.6 MB 36.2 MB/s eta 0:00:00
Collecting tensorflow_io (from object-detection==0.1)
  Downloading tensorflow_io-0.32.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (28.0 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 28.0/28.0 MB 37.7 MB/s eta 0:00:00
Requirement already satisfied: keras in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (2.12.0)
Collecting pyparsing==2.4.7 (from object-detection==0.1)
  Downloading pyparsing-2.4.7-py2.py3-none-any.whl (67 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 67.8/67.8 kB 6.3 MB/s eta 0:00:00
Collecting sacrebleu<=2.2.0 (from object-detection==0.1)
  Downloading sacrebleu-2.2.0-py3-none-any.whl (116 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 116.6/116.6 kB 8.5 MB/s eta 0:00:00
Collecting portalocker (from sacrebleu<=2.2.0->object-detection==0.1)
  Downloading portalocker-2.7.0-py2.py3-none-any.whl (15 kB)
Requirement already satisfied: regex in /usr/local/lib/python3.10/dist-packages (from sacrebleu<=2.2.0->object-detection==0.1) (2022.10.31)
Requirement already satisfied: tabulate>=0.8.9 in /usr/local/lib/python3.10/dist-packages (from sacrebleu<=2.2.0->object-detection==0.1) (0.9.0)
Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from sacrebleu<=2.2.0->object-detection==0.1) (1.24.3)
Collecting colorama (from sacrebleu<=2.2.0->object-detection==0.1)
  Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)
Requirement already satisfied: gin-config in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (0.5.0)
Requirement already satisfied: google-api-python-client>=1.6.7 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (2.84.0)
Collecting immutabledict (from tf-models-official>=2.5.1->object-detection==0.1)
  Downloading immutabledict-3.0.0-py3-none-any.whl (4.0 kB)
Requirement already satisfied: kaggle>=1.3.9 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.5.16)
Requirement already satisfied: oauth2client in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.1.3)
Requirement already satisfied: opencv-python-headless in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.8.0.74)
Requirement already satisfied: psutil>=5.4.3 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (5.9.5)
Requirement already satisfied: py-cpuinfo>=3.3.0 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (9.0.0)
Requirement already satisfied: pyyaml<5.4.0,>=5.1 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (5.3)
Collecting sentencepiece (from tf-models-official>=2.5.1->object-detection==0.1)
  Downloading sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.3 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.3/1.3 MB 44.3 MB/s eta 0:00:00
Collecting seqeval (from tf-models-official>=2.5.1->object-detection==0.1)
  Downloading seqeval-1.2.2.tar.gz (43 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 43.6/43.6 kB 5.7 MB/s eta 0:00:00
  Preparing metadata (setup.py) ... done
Requirement already satisfied: tensorflow-datasets in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.9.2)
Requirement already satisfied: tensorflow-hub>=0.6.0 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (0.14.0)
Collecting tensorflow-model-optimization>=0.4.1 (from tf-models-official>=2.5.1->object-detection==0.1)
  Downloading tensorflow_model_optimization-0.7.5-py2.py3-none-any.whl (241 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 241.2/241.2 kB 25.8 MB/s eta 0:00:00
Collecting tensorflow-text~=2.13.0 (from tf-models-official>=2.5.1->object-detection==0.1)
  Downloading tensorflow_text-2.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (6.5 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.5/6.5 MB 33.2 MB/s eta 0:00:00
Collecting tensorflow~=2.13.0 (from tf-models-official>=2.5.1->object-detection==0.1)
  Downloading tensorflow-2.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (524.1 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 524.1/524.1 MB 2.2 MB/s eta 0:00:00
Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from pandas->object-detection==0.1) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->object-detection==0.1) (2022.7.1)
Requirement already satisfied: absl-py>=0.2.2 in /usr/local/lib/python3.10/dist-packages (from tf-slim->object-detection==0.1) (1.4.0)
Collecting crcmod<2.0,>=1.7 (from apache-beam->object-detection==0.1)
  Downloading crcmod-1.7.tar.gz (89 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 89.7/89.7 kB 12.9 MB/s eta 0:00:00
  Preparing metadata (setup.py) ... done
Collecting orjson<4.0 (from apache-beam->object-detection==0.1)
  Downloading orjson-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (138 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 138.7/138.7 kB 18.8 MB/s eta 0:00:00
Collecting dill<0.3.2,>=0.3.1.1 (from apache-beam->object-detection==0.1)
  Downloading dill-0.3.1.1.tar.gz (151 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 152.0/152.0 kB 21.3 MB/s eta 0:00:00
  Preparing metadata (setup.py) ... done
Requirement already satisfied: cloudpickle~=2.2.1 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (2.2.1)
Collecting fastavro<2,>=0.23.6 (from apache-beam->object-detection==0.1)
  Downloading fastavro-1.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.7 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.7/2.7 MB 102.3 MB/s eta 0:00:00
Collecting fasteners<1.0,>=0.3 (from apache-beam->object-detection==0.1)
  Downloading fasteners-0.18-py3-none-any.whl (18 kB)
Requirement already satisfied: grpcio!=1.48.0,<2,>=1.33.1 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (1.56.0)
Collecting hdfs<3.0.0,>=2.1.0 (from apache-beam->object-detection==0.1)
  Downloading hdfs-2.7.0-py3-none-any.whl (34 kB)
Requirement already satisfied: httplib2<0.23.0,>=0.8 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (0.21.0)
Collecting objsize<0.7.0,>=0.6.1 (from apache-beam->object-detection==0.1)
  Downloading objsize-0.6.1-py3-none-any.whl (9.3 kB)
Collecting pymongo<5.0.0,>=3.8.0 (from apache-beam->object-detection==0.1)
  Downloading pymongo-4.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (603 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 603.6/603.6 kB 56.7 MB/s eta 0:00:00
Requirement already satisfied: proto-plus<2,>=1.7.1 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (1.22.3)
Requirement already satisfied: protobuf<4.24.0,>=3.20.3 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (3.20.3)
Requirement already satisfied: pydot<2,>=1.2.0 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (1.4.2)
Requirement already satisfied: requests<3.0.0,>=2.24.0 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (2.27.1)
Requirement already satisfied: typing-extensions>=3.7.0 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (4.7.1)
Collecting zstandard<1,>=0.18.0 (from apache-beam->object-detection==0.1)
  Downloading zstandard-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.7 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.7/2.7 MB 99.9 MB/s eta 0:00:00
Requirement already satisfied: pyarrow<12.0.0,>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (9.0.0)
Requirement already satisfied: cycler>=0.10.0 in /usr/local/lib/python3.10/dist-packages (from lvis->object-detection==0.1) (0.11.0)
Requirement already satisfied: kiwisolver>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from lvis->object-detection==0.1) (1.4.4)
Requirement already satisfied: opencv-python>=4.1.0.25 in /usr/local/lib/python3.10/dist-packages (from lvis->object-detection==0.1) (4.7.0.72)
Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->object-detection==0.1) (1.1.0)
Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->object-detection==0.1) (4.41.0)
Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->object-detection==0.1) (23.1)
Requirement already satisfied: tensorflow-io-gcs-filesystem==0.32.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow_io->object-detection==0.1) (0.32.0)
Requirement already satisfied: google-auth<3.0.0dev,>=1.19.0 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.17.3)
Requirement already satisfied: google-auth-httplib2>=0.1.0 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.1.0)
Requirement already satisfied: google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.11.1)
Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (4.1.1)
Collecting docopt (from hdfs<3.0.0,>=2.1.0->apache-beam->object-detection==0.1)
  Downloading docopt-0.6.2.tar.gz (25 kB)
  Preparing metadata (setup.py) ... done
Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (2023.5.7)
Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (4.65.0)
Requirement already satisfied: python-slugify in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (8.0.1)
Requirement already satisfied: urllib3 in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (1.26.16)
Requirement already satisfied: bleach in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (6.0.0)
Collecting dnspython<3.0.0,>=1.16.0 (from pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1)
  Downloading dnspython-2.4.0-py3-none-any.whl (300 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 300.0/300.0 kB 35.1 MB/s eta 0:00:00
Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (2.0.12)
Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (3.4)
Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (1.6.3)
Requirement already satisfied: flatbuffers>=23.1.21 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (23.5.26)
Requirement already satisfied: gast<=0.4.0,>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (0.4.0)
Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (0.2.0)
Requirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (3.8.0)
Collecting keras (from object-detection==0.1)
  Downloading keras-2.13.1-py3-none-any.whl (1.7 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.7/1.7 MB 74.9 MB/s eta 0:00:00
Requirement already satisfied: libclang>=13.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (16.0.6)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (3.3.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (67.7.2)
Collecting tensorboard<2.14,>=2.13 (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1)
  Downloading tensorboard-2.13.0-py3-none-any.whl (5.6 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.6/5.6 MB 62.6 MB/s eta 0:00:00
Collecting tensorflow-estimator<2.14,>=2.13.0 (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1)
  Downloading tensorflow_estimator-2.13.0-py2.py3-none-any.whl (440 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 440.8/440.8 kB 39.5 MB/s eta 0:00:00
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (2.3.0)
Collecting typing-extensions>=3.7.0 (from apache-beam->object-detection==0.1)
  Downloading typing_extensions-4.5.0-py3-none-any.whl (27 kB)
Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (1.14.1)
Requirement already satisfied: dm-tree~=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow-model-optimization>=0.4.1->tf-models-official>=2.5.1->object-detection==0.1) (0.1.8)
Requirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.10/dist-packages (from oauth2client->tf-models-official>=2.5.1->object-detection==0.1) (0.5.0)
Requirement already satisfied: pyasn1-modules>=0.0.5 in /usr/local/lib/python3.10/dist-packages (from oauth2client->tf-models-official>=2.5.1->object-detection==0.1) (0.3.0)
Requirement already satisfied: rsa>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from oauth2client->tf-models-official>=2.5.1->object-detection==0.1) (4.9)
Requirement already satisfied: scikit-learn>=0.21.3 in /usr/local/lib/python3.10/dist-packages (from seqeval->tf-models-official>=2.5.1->object-detection==0.1) (1.2.2)
Requirement already satisfied: array-record in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (0.4.0)
Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (8.1.6)
Requirement already satisfied: etils[enp,epath]>=0.9.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (1.3.0)
Requirement already satisfied: promise in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (2.3)
Requirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (1.13.1)
Requirement already satisfied: toml in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (0.10.2)
Requirement already satisfied: wheel<1.0,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from astunparse>=1.6.0->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (0.40.0)
Collecting httpcore>=0.17.3 (from dnspython<3.0.0,>=1.16.0->pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1)
  Downloading httpcore-0.17.3-py3-none-any.whl (74 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 74.5/74.5 kB 5.3 MB/s eta 0:00:00
Requirement already satisfied: sniffio<2.0,>=1.1 in /usr/local/lib/python3.10/dist-packages (from dnspython<3.0.0,>=1.16.0->pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1) (1.3.0)
Requirement already satisfied: importlib_resources in /usr/local/lib/python3.10/dist-packages (from etils[enp,epath]>=0.9.0->tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (6.0.0)
Requirement already satisfied: zipp in /usr/local/lib/python3.10/dist-packages (from etils[enp,epath]>=0.9.0->tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (3.16.2)
Requirement already satisfied: googleapis-common-protos<2.0.dev0,>=1.56.2 in /usr/local/lib/python3.10/dist-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.59.1)
Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth<3.0.0dev,>=1.19.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (5.3.1)
Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official>=2.5.1->object-detection==0.1) (1.3.1)
Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official>=2.5.1->object-detection==0.1) (3.2.0)
Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (1.0.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (3.4.3)
Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (0.7.1)
Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (2.3.6)
Requirement already satisfied: webencodings in /usr/local/lib/python3.10/dist-packages (from bleach->kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (0.5.1)
Requirement already satisfied: text-unidecode>=1.3 in /usr/local/lib/python3.10/dist-packages (from python-slugify->kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (1.3)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (1.3.1)
Collecting h11<0.15,>=0.13 (from httpcore>=0.17.3->dnspython<3.0.0,>=1.16.0->pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1)
  Downloading h11-0.14.0-py3-none-any.whl (58 kB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 58.3/58.3 kB 7.7 MB/s eta 0:00:00
Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.10/dist-packages (from httpcore>=0.17.3->dnspython<3.0.0,>=1.16.0->pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1) (3.7.1)
Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (2.1.3)
Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<5.0,>=3.0->httpcore>=0.17.3->dnspython<3.0.0,>=1.16.0->pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1) (1.1.2)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (3.2.2)
Building wheels for collected packages: object-detection, avro-python3, crcmod, dill, seqeval, docopt
  Building wheel for object-detection (setup.py) ... done
  Created wheel for object-detection: filename=object_detection-0.1-py3-none-any.whl size=1697202 sha256=5abf6963ae427a18431cdbe75d3992c0b57752ef79e36b8566231d0fcc3e5e24
  Stored in directory: /tmp/pip-ephem-wheel-cache-ut2sveq1/wheels/fb/c9/43/709f88e66b36649c7a29812ca4f6236f31caed949aabc3e335
  Building wheel for avro-python3 (setup.py) ... done
  Created wheel for avro-python3: filename=avro_python3-1.10.2-py3-none-any.whl size=43994 sha256=26f6841d462fe7dd3097ba23fc563600afeb6935585a7d3eafc063e5e0135bbf
  Stored in directory: /root/.cache/pip/wheels/bc/85/62/6cdd81c56f923946b401cecff38055b94c9b766927f7d8ca82
  Building wheel for crcmod (setup.py) ... done
  Created wheel for crcmod: filename=crcmod-1.7-cp310-cp310-linux_x86_64.whl size=31409 sha256=60402c5ae75753fccc1d47eeb8da09d5e15109cde55b8d00d0125d92460f4939
  Stored in directory: /root/.cache/pip/wheels/85/4c/07/72215c529bd59d67e3dac29711d7aba1b692f543c808ba9e86
  Building wheel for dill (setup.py) ... done
  Created wheel for dill: filename=dill-0.3.1.1-py3-none-any.whl size=78545 sha256=9b8904a75f03283a68e669b80561a2b9f63a58a9f6a4c9598254b8b5955d9a66
  Stored in directory: /root/.cache/pip/wheels/ea/e2/86/64980d90e297e7bf2ce588c2b96e818f5399c515c4bb8a7e4f
  Building wheel for seqeval (setup.py) ... done
  Created wheel for seqeval: filename=seqeval-1.2.2-py3-none-any.whl size=16165 sha256=2a40531e6b3c370703313915f72f3e87f9f9d7448ef59db18bab8b7fb39ed3d8
  Stored in directory: /root/.cache/pip/wheels/1a/67/4a/ad4082dd7dfc30f2abfe4d80a2ed5926a506eb8a972b4767fa
  Building wheel for docopt (setup.py) ... done
  Created wheel for docopt: filename=docopt-0.6.2-py2.py3-none-any.whl size=13707 sha256=d3ff8bcaf5a4ab9e527a20c6d398edfdb6c35c2fe402edd0a492ce020b6b7e5e
  Stored in directory: /root/.cache/pip/wheels/fc/ab/d4/5da2067ac95b36618c629a5f93f809425700506f72c9732fac
Successfully built object-detection avro-python3 crcmod dill seqeval docopt
Installing collected packages: sentencepiece, docopt, crcmod, zstandard, typing-extensions, tensorflow-model-optimization, tensorflow_io, tensorflow-estimator, pyparsing, portalocker, orjson, objsize, keras, immutabledict, h11, fasteners, fastavro, dill, colorama, avro-python3, sacrebleu, httpcore, hdfs, seqeval, lvis, dnspython, tensorboard, pymongo, tensorflow, apache-beam, tensorflow-text, tf-models-official, object-detection
  Attempting uninstall: typing-extensions
    Found existing installation: typing_extensions 4.7.1
    Uninstalling typing_extensions-4.7.1:
      Successfully uninstalled typing_extensions-4.7.1
  Attempting uninstall: tensorflow-estimator
    Found existing installation: tensorflow-estimator 2.12.0
    Uninstalling tensorflow-estimator-2.12.0:
      Successfully uninstalled tensorflow-estimator-2.12.0
  Attempting uninstall: pyparsing
    Found existing installation: pyparsing 3.1.0
    Uninstalling pyparsing-3.1.0:
      Successfully uninstalled pyparsing-3.1.0
  Attempting uninstall: keras
    Found existing installation: keras 2.12.0
    Uninstalling keras-2.12.0:
      Successfully uninstalled keras-2.12.0
  Attempting uninstall: tensorboard
    Found existing installation: tensorboard 2.12.3
    Uninstalling tensorboard-2.12.3:
      Successfully uninstalled tensorboard-2.12.3
  Attempting uninstall: tensorflow
    Found existing installation: tensorflow 2.12.0
    Uninstalling tensorflow-2.12.0:
      Successfully uninstalled tensorflow-2.12.0
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
flax 0.7.0 requires PyYAML>=5.4.1, but you have pyyaml 5.3 which is incompatible.
Successfully installed apache-beam-2.49.0 avro-python3-1.10.2 colorama-0.4.6 crcmod-1.7 dill-0.3.1.1 dnspython-2.4.0 docopt-0.6.2 fastavro-1.8.2 fasteners-0.18 h11-0.14.0 hdfs-2.7.0 httpcore-0.17.3 immutabledict-3.0.0 keras-2.13.1 lvis-0.5.3 object-detection-0.1 objsize-0.6.1 orjson-3.9.2 portalocker-2.7.0 pymongo-4.4.1 pyparsing-2.4.7 sacrebleu-2.2.0 sentencepiece-0.1.99 seqeval-1.2.2 tensorboard-2.13.0 tensorflow-2.13.0 tensorflow-estimator-2.13.0 tensorflow-model-optimization-0.7.5 tensorflow-text-2.13.0 tensorflow_io-0.32.0 tf-models-official-2.13.1 typing-extensions-4.5.0 zstandard-0.21.0
In [ ]:
# Test if object detection package has been sucessfully installed

!python object_detection/builders/model_builder_tf2_test.py
2023-07-24 01:20:09.414712: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-07-24 01:20:10.878737: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so: undefined symbol: _ZN3tsl6Status12empty_stringB5cxx11Ev']
  warnings.warn(f"unable to load libtensorflow_io_plugins.so: {e}")
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so: undefined symbol: _ZNK10tensorflow4data11DatasetBase8FinalizeEPNS_15OpKernelContextESt8functionIFN3tsl8StatusOrISt10unique_ptrIS1_NS5_4core15RefCountDeleterEEEEvEE']
  warnings.warn(f"file system plugins are not loaded: {e}")
2023-07-24 01:20:14.369688: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-24 01:20:14.878842: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-24 01:20:14.879200: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
Running tests under Python 3.10.6: /usr/bin/python3
[ RUN      ] ModelBuilderTF2Test.test_create_center_net_deepmac
2023-07-24 01:20:14.894924: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-24 01:20:14.895263: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-24 01:20:14.895512: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-24 01:20:16.437765: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-24 01:20:16.438236: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-24 01:20:16.438587: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-24 01:20:16.438794: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:47] Overriding orig_value setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
2023-07-24 01:20:16.438852: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1639] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 13692 MB memory:  -> device: 0, name: Tesla T4, pci bus id: 0000:00:04.0, compute capability: 7.5
WARNING:tensorflow:`tf.keras.layers.experimental.SyncBatchNormalization` endpoint is deprecated and will be removed in a future release. Please use `tf.keras.layers.BatchNormalization` with parameter `synchronized` set to True.
W0724 01:20:16.482963 138439090729600 batch_normalization.py:1531] `tf.keras.layers.experimental.SyncBatchNormalization` endpoint is deprecated and will be removed in a future release. Please use `tf.keras.layers.BatchNormalization` with parameter `synchronized` set to True.
W0724 01:20:17.449128 138439090729600 model_builder.py:1112] Building experimental DeepMAC meta-arch. Some features may be omitted.
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_deepmac): 4.14s
I0724 01:20:19.018120 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_center_net_deepmac): 4.14s
[       OK ] ModelBuilderTF2Test.test_create_center_net_deepmac
[ RUN      ] ModelBuilderTF2Test.test_create_center_net_model0 (customize_head_params=True)
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_model0 (customize_head_params=True)): 3.02s
I0724 01:20:22.042393 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_center_net_model0 (customize_head_params=True)): 3.02s
[       OK ] ModelBuilderTF2Test.test_create_center_net_model0 (customize_head_params=True)
[ RUN      ] ModelBuilderTF2Test.test_create_center_net_model1 (customize_head_params=False)
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_model1 (customize_head_params=False)): 1.38s
I0724 01:20:23.419921 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_center_net_model1 (customize_head_params=False)): 1.38s
[       OK ] ModelBuilderTF2Test.test_create_center_net_model1 (customize_head_params=False)
[ RUN      ] ModelBuilderTF2Test.test_create_center_net_model_from_keypoints
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_model_from_keypoints): 1.5s
I0724 01:20:24.917790 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_center_net_model_from_keypoints): 1.5s
[       OK ] ModelBuilderTF2Test.test_create_center_net_model_from_keypoints
[ RUN      ] ModelBuilderTF2Test.test_create_center_net_model_mobilenet
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_model_mobilenet): 7.61s
I0724 01:20:32.529135 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_center_net_model_mobilenet): 7.61s
[       OK ] ModelBuilderTF2Test.test_create_center_net_model_mobilenet
[ RUN      ] ModelBuilderTF2Test.test_create_experimental_model
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_experimental_model): 0.0s
I0724 01:20:32.540436 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_experimental_model): 0.0s
[       OK ] ModelBuilderTF2Test.test_create_experimental_model
[ RUN      ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)): 0.05s
I0724 01:20:32.592100 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)): 0.05s
[       OK ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)
[ RUN      ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)): 0.05s
I0724 01:20:32.638619 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)): 0.05s
[       OK ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)
[ RUN      ] ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner): 0.04s
I0724 01:20:32.679755 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner): 0.04s
[       OK ] ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner
[ RUN      ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul): 0.34s
I0724 01:20:33.025096 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul): 0.34s
[       OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul
[ RUN      ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul): 0.32s
I0724 01:20:33.345795 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul): 0.32s
[       OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul
[ RUN      ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul): 0.3s
I0724 01:20:33.644825 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul): 0.3s
[       OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul
[ RUN      ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul): 0.37s
I0724 01:20:34.017995 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul): 0.37s
[       OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul
[ RUN      ] ModelBuilderTF2Test.test_create_rfcn_model_from_config
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_rfcn_model_from_config): 0.31s
I0724 01:20:34.331871 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_rfcn_model_from_config): 0.31s
[       OK ] ModelBuilderTF2Test.test_create_rfcn_model_from_config
[ RUN      ] ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config): 0.08s
I0724 01:20:34.415721 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config): 0.08s
[       OK ] ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config
[ RUN      ] ModelBuilderTF2Test.test_create_ssd_models_from_config
I0724 01:20:34.874937 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b0
I0724 01:20:34.875204 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 64
I0724 01:20:34.875304 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 3
I0724 01:20:34.880167 138439090729600 efficientnet_model.py:143] round_filter input=32 output=32
I0724 01:20:34.946235 138439090729600 efficientnet_model.py:143] round_filter input=32 output=32
I0724 01:20:34.946493 138439090729600 efficientnet_model.py:143] round_filter input=16 output=16
I0724 01:20:35.184557 138439090729600 efficientnet_model.py:143] round_filter input=16 output=16
I0724 01:20:35.184774 138439090729600 efficientnet_model.py:143] round_filter input=24 output=24
I0724 01:20:35.723332 138439090729600 efficientnet_model.py:143] round_filter input=24 output=24
I0724 01:20:35.723565 138439090729600 efficientnet_model.py:143] round_filter input=40 output=40
I0724 01:20:36.212841 138439090729600 efficientnet_model.py:143] round_filter input=40 output=40
I0724 01:20:36.213113 138439090729600 efficientnet_model.py:143] round_filter input=80 output=80
I0724 01:20:37.076188 138439090729600 efficientnet_model.py:143] round_filter input=80 output=80
I0724 01:20:37.080979 138439090729600 efficientnet_model.py:143] round_filter input=112 output=112
I0724 01:20:37.975229 138439090729600 efficientnet_model.py:143] round_filter input=112 output=112
I0724 01:20:37.975496 138439090729600 efficientnet_model.py:143] round_filter input=192 output=192
I0724 01:20:38.729567 138439090729600 efficientnet_model.py:143] round_filter input=192 output=192
I0724 01:20:38.729771 138439090729600 efficientnet_model.py:143] round_filter input=320 output=320
I0724 01:20:38.887799 138439090729600 efficientnet_model.py:143] round_filter input=1280 output=1280
I0724 01:20:38.946954 138439090729600 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.0, depth_coefficient=1.0, resolution=224, dropout_rate=0.2, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0724 01:20:39.014205 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b1
I0724 01:20:39.014434 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 88
I0724 01:20:39.014528 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 4
I0724 01:20:39.018285 138439090729600 efficientnet_model.py:143] round_filter input=32 output=32
I0724 01:20:39.041238 138439090729600 efficientnet_model.py:143] round_filter input=32 output=32
I0724 01:20:39.041448 138439090729600 efficientnet_model.py:143] round_filter input=16 output=16
I0724 01:20:39.226057 138439090729600 efficientnet_model.py:143] round_filter input=16 output=16
I0724 01:20:39.226231 138439090729600 efficientnet_model.py:143] round_filter input=24 output=24
I0724 01:20:39.568785 138439090729600 efficientnet_model.py:143] round_filter input=24 output=24
I0724 01:20:39.568971 138439090729600 efficientnet_model.py:143] round_filter input=40 output=40
I0724 01:20:40.143369 138439090729600 efficientnet_model.py:143] round_filter input=40 output=40
I0724 01:20:40.143538 138439090729600 efficientnet_model.py:143] round_filter input=80 output=80
I0724 01:20:40.779392 138439090729600 efficientnet_model.py:143] round_filter input=80 output=80
I0724 01:20:40.779609 138439090729600 efficientnet_model.py:143] round_filter input=112 output=112
I0724 01:20:41.437864 138439090729600 efficientnet_model.py:143] round_filter input=112 output=112
I0724 01:20:41.438087 138439090729600 efficientnet_model.py:143] round_filter input=192 output=192
I0724 01:20:42.257851 138439090729600 efficientnet_model.py:143] round_filter input=192 output=192
I0724 01:20:42.258109 138439090729600 efficientnet_model.py:143] round_filter input=320 output=320
I0724 01:20:42.630148 138439090729600 efficientnet_model.py:143] round_filter input=1280 output=1280
I0724 01:20:42.696536 138439090729600 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.0, depth_coefficient=1.1, resolution=240, dropout_rate=0.2, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0724 01:20:42.836371 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b2
I0724 01:20:42.836596 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 112
I0724 01:20:42.836678 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 5
I0724 01:20:42.840301 138439090729600 efficientnet_model.py:143] round_filter input=32 output=32
I0724 01:20:42.872278 138439090729600 efficientnet_model.py:143] round_filter input=32 output=32
I0724 01:20:42.872493 138439090729600 efficientnet_model.py:143] round_filter input=16 output=16
I0724 01:20:43.152460 138439090729600 efficientnet_model.py:143] round_filter input=16 output=16
I0724 01:20:43.152678 138439090729600 efficientnet_model.py:143] round_filter input=24 output=24
I0724 01:20:43.687868 138439090729600 efficientnet_model.py:143] round_filter input=24 output=24
I0724 01:20:43.688128 138439090729600 efficientnet_model.py:143] round_filter input=40 output=48
I0724 01:20:44.245640 138439090729600 efficientnet_model.py:143] round_filter input=40 output=48
I0724 01:20:44.245877 138439090729600 efficientnet_model.py:143] round_filter input=80 output=88
I0724 01:20:44.970475 138439090729600 efficientnet_model.py:143] round_filter input=80 output=88
I0724 01:20:44.970719 138439090729600 efficientnet_model.py:143] round_filter input=112 output=120
I0724 01:20:45.824649 138439090729600 efficientnet_model.py:143] round_filter input=112 output=120
I0724 01:20:45.825091 138439090729600 efficientnet_model.py:143] round_filter input=192 output=208
I0724 01:20:47.041252 138439090729600 efficientnet_model.py:143] round_filter input=192 output=208
I0724 01:20:47.041499 138439090729600 efficientnet_model.py:143] round_filter input=320 output=352
I0724 01:20:47.585009 138439090729600 efficientnet_model.py:143] round_filter input=1280 output=1408
I0724 01:20:47.660436 138439090729600 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.1, depth_coefficient=1.2, resolution=260, dropout_rate=0.3, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0724 01:20:47.839485 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b3
I0724 01:20:47.839702 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 160
I0724 01:20:47.839803 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 6
I0724 01:20:47.844650 138439090729600 efficientnet_model.py:143] round_filter input=32 output=40
I0724 01:20:47.885457 138439090729600 efficientnet_model.py:143] round_filter input=32 output=40
I0724 01:20:47.885868 138439090729600 efficientnet_model.py:143] round_filter input=16 output=24
I0724 01:20:48.249775 138439090729600 efficientnet_model.py:143] round_filter input=16 output=24
I0724 01:20:48.250020 138439090729600 efficientnet_model.py:143] round_filter input=24 output=32
I0724 01:20:48.806297 138439090729600 efficientnet_model.py:143] round_filter input=24 output=32
I0724 01:20:48.806748 138439090729600 efficientnet_model.py:143] round_filter input=40 output=48
I0724 01:20:49.339481 138439090729600 efficientnet_model.py:143] round_filter input=40 output=48
I0724 01:20:49.339864 138439090729600 efficientnet_model.py:143] round_filter input=80 output=96
I0724 01:20:50.273162 138439090729600 efficientnet_model.py:143] round_filter input=80 output=96
I0724 01:20:50.273422 138439090729600 efficientnet_model.py:143] round_filter input=112 output=136
I0724 01:20:51.242760 138439090729600 efficientnet_model.py:143] round_filter input=112 output=136
I0724 01:20:51.243004 138439090729600 efficientnet_model.py:143] round_filter input=192 output=232
I0724 01:20:52.427350 138439090729600 efficientnet_model.py:143] round_filter input=192 output=232
I0724 01:20:52.427572 138439090729600 efficientnet_model.py:143] round_filter input=320 output=384
I0724 01:20:52.842986 138439090729600 efficientnet_model.py:143] round_filter input=1280 output=1536
I0724 01:20:52.934597 138439090729600 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.2, depth_coefficient=1.4, resolution=300, dropout_rate=0.3, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0724 01:20:53.127321 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b4
I0724 01:20:53.127560 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 224
I0724 01:20:53.127653 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 7
I0724 01:20:53.131211 138439090729600 efficientnet_model.py:143] round_filter input=32 output=48
I0724 01:20:53.173674 138439090729600 efficientnet_model.py:143] round_filter input=32 output=48
I0724 01:20:53.173883 138439090729600 efficientnet_model.py:143] round_filter input=16 output=24
I0724 01:20:53.454920 138439090729600 efficientnet_model.py:143] round_filter input=16 output=24
I0724 01:20:53.455165 138439090729600 efficientnet_model.py:143] round_filter input=24 output=32
I0724 01:20:54.162480 138439090729600 efficientnet_model.py:143] round_filter input=24 output=32
I0724 01:20:54.162712 138439090729600 efficientnet_model.py:143] round_filter input=40 output=56
I0724 01:20:55.319935 138439090729600 efficientnet_model.py:143] round_filter input=40 output=56
I0724 01:20:55.320178 138439090729600 efficientnet_model.py:143] round_filter input=80 output=112
I0724 01:20:56.447648 138439090729600 efficientnet_model.py:143] round_filter input=80 output=112
I0724 01:20:56.447885 138439090729600 efficientnet_model.py:143] round_filter input=112 output=160
I0724 01:20:57.513421 138439090729600 efficientnet_model.py:143] round_filter input=112 output=160
I0724 01:20:57.513677 138439090729600 efficientnet_model.py:143] round_filter input=192 output=272
I0724 01:20:58.932376 138439090729600 efficientnet_model.py:143] round_filter input=192 output=272
I0724 01:20:58.932624 138439090729600 efficientnet_model.py:143] round_filter input=320 output=448
I0724 01:20:59.300993 138439090729600 efficientnet_model.py:143] round_filter input=1280 output=1792
I0724 01:20:59.377303 138439090729600 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.4, depth_coefficient=1.8, resolution=380, dropout_rate=0.4, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0724 01:20:59.550939 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b5
I0724 01:20:59.551196 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 288
I0724 01:20:59.551311 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 7
I0724 01:20:59.555185 138439090729600 efficientnet_model.py:143] round_filter input=32 output=48
I0724 01:20:59.596613 138439090729600 efficientnet_model.py:143] round_filter input=32 output=48
I0724 01:20:59.599427 138439090729600 efficientnet_model.py:143] round_filter input=16 output=24
I0724 01:21:00.019097 138439090729600 efficientnet_model.py:143] round_filter input=16 output=24
I0724 01:21:00.019284 138439090729600 efficientnet_model.py:143] round_filter input=24 output=40
I0724 01:21:00.580674 138439090729600 efficientnet_model.py:143] round_filter input=24 output=40
I0724 01:21:00.580848 138439090729600 efficientnet_model.py:143] round_filter input=40 output=64
I0724 01:21:01.159868 138439090729600 efficientnet_model.py:143] round_filter input=40 output=64
I0724 01:21:01.160073 138439090729600 efficientnet_model.py:143] round_filter input=80 output=128
I0724 01:21:01.987677 138439090729600 efficientnet_model.py:143] round_filter input=80 output=128
I0724 01:21:01.987865 138439090729600 efficientnet_model.py:143] round_filter input=112 output=176
I0724 01:21:02.814872 138439090729600 efficientnet_model.py:143] round_filter input=112 output=176
I0724 01:21:02.815062 138439090729600 efficientnet_model.py:143] round_filter input=192 output=304
I0724 01:21:04.193312 138439090729600 efficientnet_model.py:143] round_filter input=192 output=304
I0724 01:21:04.193557 138439090729600 efficientnet_model.py:143] round_filter input=320 output=512
I0724 01:21:04.723359 138439090729600 efficientnet_model.py:143] round_filter input=1280 output=2048
I0724 01:21:04.794101 138439090729600 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.6, depth_coefficient=2.2, resolution=456, dropout_rate=0.4, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0724 01:21:05.000462 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b6
I0724 01:21:05.000686 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 384
I0724 01:21:05.000776 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 8
I0724 01:21:05.004499 138439090729600 efficientnet_model.py:143] round_filter input=32 output=56
I0724 01:21:05.049827 138439090729600 efficientnet_model.py:143] round_filter input=32 output=56
I0724 01:21:05.050025 138439090729600 efficientnet_model.py:143] round_filter input=16 output=32
I0724 01:21:05.456311 138439090729600 efficientnet_model.py:143] round_filter input=16 output=32
I0724 01:21:05.456551 138439090729600 efficientnet_model.py:143] round_filter input=24 output=40
I0724 01:21:06.493755 138439090729600 efficientnet_model.py:143] round_filter input=24 output=40
I0724 01:21:06.494000 138439090729600 efficientnet_model.py:143] round_filter input=40 output=72
I0724 01:21:07.558255 138439090729600 efficientnet_model.py:143] round_filter input=40 output=72
I0724 01:21:07.558481 138439090729600 efficientnet_model.py:143] round_filter input=80 output=144
I0724 01:21:09.404932 138439090729600 efficientnet_model.py:143] round_filter input=80 output=144
I0724 01:21:09.405181 138439090729600 efficientnet_model.py:143] round_filter input=112 output=200
I0724 01:21:10.861875 138439090729600 efficientnet_model.py:143] round_filter input=112 output=200
I0724 01:21:10.862163 138439090729600 efficientnet_model.py:143] round_filter input=192 output=344
I0724 01:21:12.819561 138439090729600 efficientnet_model.py:143] round_filter input=192 output=344
I0724 01:21:12.819787 138439090729600 efficientnet_model.py:143] round_filter input=320 output=576
I0724 01:21:13.359139 138439090729600 efficientnet_model.py:143] round_filter input=1280 output=2304
I0724 01:21:13.431503 138439090729600 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.8, depth_coefficient=2.6, resolution=528, dropout_rate=0.5, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0724 01:21:13.667789 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b7
I0724 01:21:13.668014 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 384
I0724 01:21:13.668128 138439090729600 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 8
I0724 01:21:13.671872 138439090729600 efficientnet_model.py:143] round_filter input=32 output=64
I0724 01:21:13.710432 138439090729600 efficientnet_model.py:143] round_filter input=32 output=64
I0724 01:21:13.710645 138439090729600 efficientnet_model.py:143] round_filter input=16 output=32
I0724 01:21:14.285712 138439090729600 efficientnet_model.py:143] round_filter input=16 output=32
I0724 01:21:14.285944 138439090729600 efficientnet_model.py:143] round_filter input=24 output=48
I0724 01:21:15.506693 138439090729600 efficientnet_model.py:143] round_filter input=24 output=48
I0724 01:21:15.506936 138439090729600 efficientnet_model.py:143] round_filter input=40 output=80
I0724 01:21:16.483685 138439090729600 efficientnet_model.py:143] round_filter input=40 output=80
I0724 01:21:16.483895 138439090729600 efficientnet_model.py:143] round_filter input=80 output=160
I0724 01:21:17.652847 138439090729600 efficientnet_model.py:143] round_filter input=80 output=160
I0724 01:21:17.653055 138439090729600 efficientnet_model.py:143] round_filter input=112 output=224
I0724 01:21:18.817519 138439090729600 efficientnet_model.py:143] round_filter input=112 output=224
I0724 01:21:18.817731 138439090729600 efficientnet_model.py:143] round_filter input=192 output=384
I0724 01:21:20.315922 138439090729600 efficientnet_model.py:143] round_filter input=192 output=384
I0724 01:21:20.316114 138439090729600 efficientnet_model.py:143] round_filter input=320 output=640
I0724 01:21:20.790757 138439090729600 efficientnet_model.py:143] round_filter input=1280 output=2560
I0724 01:21:20.841262 138439090729600 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=2.0, depth_coefficient=3.1, resolution=600, dropout_rate=0.5, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_ssd_models_from_config): 46.57s
I0724 01:21:20.987147 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_ssd_models_from_config): 46.57s
[       OK ] ModelBuilderTF2Test.test_create_ssd_models_from_config
[ RUN      ] ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update): 0.0s
I0724 01:21:21.019313 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update): 0.0s
[       OK ] ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update
[ RUN      ] ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold): 0.0s
I0724 01:21:21.021357 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold): 0.0s
[       OK ] ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold
[ RUN      ] ModelBuilderTF2Test.test_invalid_model_config_proto
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_model_config_proto): 0.0s
I0724 01:21:21.021931 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_invalid_model_config_proto): 0.0s
[       OK ] ModelBuilderTF2Test.test_invalid_model_config_proto
[ RUN      ] ModelBuilderTF2Test.test_invalid_second_stage_batch_size
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_second_stage_batch_size): 0.0s
I0724 01:21:21.023600 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_invalid_second_stage_batch_size): 0.0s
[       OK ] ModelBuilderTF2Test.test_invalid_second_stage_batch_size
[ RUN      ] ModelBuilderTF2Test.test_session
[  SKIPPED ] ModelBuilderTF2Test.test_session
[ RUN      ] ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor): 0.0s
I0724 01:21:21.025001 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor): 0.0s
[       OK ] ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor
[ RUN      ] ModelBuilderTF2Test.test_unknown_meta_architecture
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_unknown_meta_architecture): 0.0s
I0724 01:21:21.025490 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_unknown_meta_architecture): 0.0s
[       OK ] ModelBuilderTF2Test.test_unknown_meta_architecture
[ RUN      ] ModelBuilderTF2Test.test_unknown_ssd_feature_extractor
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_unknown_ssd_feature_extractor): 0.0s
I0724 01:21:21.026639 138439090729600 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_unknown_ssd_feature_extractor): 0.0s
[       OK ] ModelBuilderTF2Test.test_unknown_ssd_feature_extractor
----------------------------------------------------------------------
Ran 24 tests in 66.144s

OK (skipped=1)

Coco API¶

In [ ]:
# Go back to Tensorflow/

%cd ..
%cd ..
/content/Tensorflow/models
/content/Tensorflow
In [ ]:
# Clone and install CocoAPI

!git clone https://github.com/cocodataset/cocoapi.git
%cd cocoapi/PythonAPI
!make
%cp -r pycocotools /content/Tensorflow/models/research/
Cloning into 'cocoapi'...
remote: Enumerating objects: 975, done.
remote: Total 975 (delta 0), reused 0 (delta 0), pack-reused 975
Receiving objects: 100% (975/975), 11.72 MiB | 3.61 MiB/s, done.
Resolving deltas: 100% (576/576), done.
/content/Tensorflow/cocoapi/PythonAPI
python setup.py build_ext --inplace
running build_ext
cythoning pycocotools/_mask.pyx to pycocotools/_mask.c
/usr/local/lib/python3.10/dist-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /content/Tensorflow/cocoapi/PythonAPI/pycocotools/_mask.pyx
  tree = Parsing.p_module(s, pxd, full_module_name)
building 'pycocotools._mask' extension
creating build
creating build/common
creating build/temp.linux-x86_64-cpython-310
creating build/temp.linux-x86_64-cpython-310/pycocotools
x86_64-linux-gnu-gcc -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -fPIC -I/usr/local/lib/python3.10/dist-packages/numpy/core/include -I../common -I/usr/include/python3.10 -c ../common/maskApi.c -o build/temp.linux-x86_64-cpython-310/../common/maskApi.o -Wno-cpp -Wno-unused-function -std=c99
../common/maskApi.c: In function ‘rleDecode’:
../common/maskApi.c:46:7: warning: this ‘for’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;]
   46 |       for( k=0; k<R[i].cnts[j]; k++ ) *(M++)=v; v=!v; }}
      |       ^~~
../common/maskApi.c:46:49: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘for’
   46 |       for( k=0; k<R[i].cnts[j]; k++ ) *(M++)=v; v=!v; }}
      |                                                 ^
../common/maskApi.c: In function ‘rleFrPoly’:
../common/maskApi.c:166:3: warning: this ‘for’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;]
  166 |   for(j=0; j<k; j++) x[j]=(int)(scale*xy[j*2+0]+.5); x[k]=x[0];
      |   ^~~
../common/maskApi.c:166:54: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘for’
  166 |   for(j=0; j<k; j++) x[j]=(int)(scale*xy[j*2+0]+.5); x[k]=x[0];
      |                                                      ^
../common/maskApi.c:167:3: warning: this ‘for’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;]
  167 |   for(j=0; j<k; j++) y[j]=(int)(scale*xy[j*2+1]+.5); y[k]=y[0];
      |   ^~~
../common/maskApi.c:167:54: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘for’
  167 |   for(j=0; j<k; j++) y[j]=(int)(scale*xy[j*2+1]+.5); y[k]=y[0];
      |                                                      ^
../common/maskApi.c: In function ‘rleToString’:
../common/maskApi.c:212:7: warning: this ‘if’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;]
  212 |       if(more) c |= 0x20; c+=48; s[p++]=c;
      |       ^~
../common/maskApi.c:212:27: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘if’
  212 |       if(more) c |= 0x20; c+=48; s[p++]=c;
      |                           ^
../common/maskApi.c: In function ‘rleFrString’:
../common/maskApi.c:220:3: warning: this ‘while’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;]
  220 |   while( s[m] ) m++; cnts=malloc(sizeof(uint)*m); m=0;
      |   ^~~~~
../common/maskApi.c:220:22: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘while’
  220 |   while( s[m] ) m++; cnts=malloc(sizeof(uint)*m); m=0;
      |                      ^~~~
../common/maskApi.c:228:5: warning: this ‘if’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;]
  228 |     if(m>2) x+=(long) cnts[m-2]; cnts[m++]=(uint) x;
      |     ^~
../common/maskApi.c:228:34: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘if’
  228 |     if(m>2) x+=(long) cnts[m-2]; cnts[m++]=(uint) x;
      |                                  ^~~~
x86_64-linux-gnu-gcc -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -fPIC -I/usr/local/lib/python3.10/dist-packages/numpy/core/include -I../common -I/usr/include/python3.10 -c pycocotools/_mask.c -o build/temp.linux-x86_64-cpython-310/pycocotools/_mask.o -Wno-cpp -Wno-unused-function -std=c99
creating build/lib.linux-x86_64-cpython-310
creating build/lib.linux-x86_64-cpython-310/pycocotools
x86_64-linux-gnu-gcc -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -g -fwrapv -O2 build/temp.linux-x86_64-cpython-310/../common/maskApi.o build/temp.linux-x86_64-cpython-310/pycocotools/_mask.o -L/usr/lib/x86_64-linux-gnu -o build/lib.linux-x86_64-cpython-310/pycocotools/_mask.cpython-310-x86_64-linux-gnu.so
copying build/lib.linux-x86_64-cpython-310/pycocotools/_mask.cpython-310-x86_64-linux-gnu.so -> pycocotools
rm -rf build

Training data¶

The dataset contains 4 categories:

Category
Traffic Light
Stop
Speed Limit
Crosswalk

More details are provided at the annotation section.

In [ ]:
# Go back to Tensorflow/

%cd ..
%cd ..
/content/Tensorflow/cocoapi
/content/Tensorflow
In [ ]:
# Create new directory Tensorflow/data/

if not os.path.exists("data"):
    os.mkdir("data")

# Download dataset to Tensorflow/data/

%cd data
!gdown "https://drive.google.com/uc?export=download&id=1DgDd-PHRYb-y0FGDDXHKbi6ZaGoaTimB"

with ZipFile("archive.zip") as zipfile:
    zipfile.extractall()
!rm "archive.zip"
/content/Tensorflow/data
Downloading...
From: https://drive.google.com/uc?export=download&id=1DgDd-PHRYb-y0FGDDXHKbi6ZaGoaTimB
To: /content/Tensorflow/data/archive.zip
100% 229M/229M [00:02<00:00, 108MB/s] 

Split to train, valid and test sets¶

In [ ]:
# Still in Tensorflow/data/, create train and valid directories

if not os.path.exists("train"):
    os.mkdir("train")
if not os.path.exists("valid"):
    os.mkdir("valid")
In [ ]:
import numpy as np

np.random.seed(42)
all_pngs = os.listdir("./images")

# train:valid ratio is 8:2
train_size = int(len(all_pngs) * .8)

# prepare lists containing of train and validation images
train_pngs = np.random.choice(all_pngs, train_size, replace=False)
train_xmls = [png[:-4]+'.xml' for png in train_pngs]
valid_pngs = np.setdiff1d(all_pngs, train_pngs)
valid_xmls = [png[:-4]+'.xml' for png in valid_pngs]

len(all_pngs), len(train_pngs), len(valid_pngs)
Out[ ]:
(877, 701, 176)
In [ ]:
import shutil

# Copy train and validation images to the corresponding directories
# images go from Tensorflow/data/images/ to Tensorflow/data/train/ (or valid/)
# annotations go from Tensorflow/data/annotations/ to Tensorflow/data/train/ (or valid/)

for png in train_pngs:
    shutil.copy("images/"+png, "train/")
for xml in train_xmls:
    shutil.copy("annotations/"+xml, "train/")
for png in valid_pngs:
    shutil.copy("images/"+png, "valid/")
for xml in valid_xmls:
    shutil.copy("annotations/"+xml, "valid/")

Create label map¶

In [ ]:
# Go back to Tensorflow/

%cd ..
/content/Tensorflow
In [ ]:
# Labels
labels = [{'name':'trafficlight', 'id':1}, {'name':'stop', 'id':2}, {'name':'speedlimit', 'id':3}, {'name':'crosswalk', 'id':4}]

# Create new directory to store lable map file: Tensorflow/training/annotations/
if not os.path.exists("training"):
    os.mkdir("training")
if not os.path.exists("training/annotations"):
    os.mkdir("training/annotations")

# Create and store label map file to the new directory
with open("training/annotations/label_map.pbtxt", 'w') as f:
    for label in labels:
        f.write('item { \n')
        f.write('\tname:\'{}\'\n'.format(label['name']))
        f.write('\tid:{}\n'.format(label['id']))
        f.write('}\n')

TFRecord¶

In [ ]:
# Create new directory to store TFRecord generation script: Tensorflow/scripts/

if not os.path.exists("scripts"):
    os.mkdir("scripts")
if not os.path.exists("scripts/preprocessing"):
    os.mkdir("scripts/preprocessing")
In [ ]:
!wget https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/_downloads/da4babe668a8afb093cc7776d7e630f3/generate_tfrecord.py -P scripts/preprocessing
--2023-07-24 01:21:51--  https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/_downloads/da4babe668a8afb093cc7776d7e630f3/generate_tfrecord.py
Resolving tensorflow-object-detection-api-tutorial.readthedocs.io (tensorflow-object-detection-api-tutorial.readthedocs.io)... 104.17.33.82, 104.17.32.82, 2606:4700::6811:2152, ...
Connecting to tensorflow-object-detection-api-tutorial.readthedocs.io (tensorflow-object-detection-api-tutorial.readthedocs.io)|104.17.33.82|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 6410 (6.3K) [text/x-python]
Saving to: ‘scripts/preprocessing/generate_tfrecord.py’

generate_tfrecord.p 100%[===================>]   6.26K  --.-KB/s    in 0s      

2023-07-24 01:21:51 (28.7 MB/s) - ‘scripts/preprocessing/generate_tfrecord.py’ saved [6410/6410]

In [ ]:
# Generate TFRecord files by running the script in Tensorflow/scripts/ and store the generated file in Tensorflow/training/annotations/

%cd scripts/preprocessing
!python generate_tfrecord.py -x /content/Tensorflow/data/train -l /content/Tensorflow/training/annotations/label_map.pbtxt -o /content/Tensorflow/training/annotations/train.record
!python generate_tfrecord.py -x /content/Tensorflow/data/valid -l /content/Tensorflow/training/annotations/label_map.pbtxt -o /content/Tensorflow/training//annotations/valid.record
/content/Tensorflow/scripts/preprocessing
Successfully created the TFRecord file: /content/Tensorflow/training/annotations/train.record
Successfully created the TFRecord file: /content/Tensorflow/training//annotations/valid.record

Pretrained model¶

In [ ]:
# go back to Tensorflow/

%cd ..
%cd ..
/content/Tensorflow/scripts
/content/Tensorflow
In [ ]:
# Create new directory to store pretrained model: Tensorflow/pre-trained-models/

if not os.path.exists("pre-trained-models"):
    os.mkdir("pre-trained-models")

# Download pretrained model from model zoo

command = f"wget http://download.tensorflow.org/models/object_detection/tf2/20200711/{MODEL_NAME}.tar.gz -P pre-trained-models"
!{command}

with tarfile.open(f"pre-trained-models/{MODEL_NAME}.tar.gz") as tar:
    tar.extractall("pre-trained-models")

command = f"rm pre-trained-models/{MODEL_NAME}.tar.gz"
!{command}
--2023-07-24 01:22:04--  http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz
Resolving download.tensorflow.org (download.tensorflow.org)... 74.125.68.128, 2404:6800:4003:c1a::80
Connecting to download.tensorflow.org (download.tensorflow.org)|74.125.68.128|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 20518283 (20M) [application/x-tar]
Saving to: ‘pre-trained-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz’

ssd_mobilenet_v2_fp 100%[===================>]  19.57M  53.1MB/s    in 0.4s    

2023-07-24 01:22:05 (53.1 MB/s) - ‘pre-trained-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz’ saved [20518283/20518283]

Modify training config¶

In [ ]:
# Create new directory to save training config file: TensorFlow/training/models/
# Copy config file from TensorFlow/pre-trained-model/ to TensorFlow/training/models/

if not os.path.exists("training"):
    os.mkdir("training")
if not os.path.exists("training/models"):
    os.mkdir("training/models")

os.mkdir(f"training/models/{MODEL_NAME}")

PIPELINE_CONFIG = f"/content/Tensorflow/training/models/{MODEL_NAME}/pipeline.config"

shutil.copy(f"/content/Tensorflow/pre-trained-models/{MODEL_NAME}/pipeline.config", PIPELINE_CONFIG)
Out[ ]:
'/content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config'
In [ ]:
# Modify training config

import tensorflow as tf
from object_detection.protos import pipeline_pb2
from google.protobuf import text_format

pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.io.gfile.GFile(PIPELINE_CONFIG, "r") as f:
    proto_str = f.read()
    text_format.Merge(proto_str, pipeline_config)
In [ ]:
pipeline_config
Out[ ]:
model {
  ssd {
    num_classes: 90
    image_resizer {
      fixed_shape_resizer {
        height: 640
        width: 640
      }
    }
    feature_extractor {
      type: "ssd_mobilenet_v2_fpn_keras"
      depth_multiplier: 1.0
      min_depth: 16
      conv_hyperparams {
        regularizer {
          l2_regularizer {
            weight: 3.9999998989515007e-05
          }
        }
        initializer {
          random_normal_initializer {
            mean: 0.0
            stddev: 0.009999999776482582
          }
        }
        activation: RELU_6
        batch_norm {
          decay: 0.996999979019165
          scale: true
          epsilon: 0.0010000000474974513
        }
      }
      use_depthwise: true
      override_base_feature_extractor_hyperparams: true
      fpn {
        min_level: 3
        max_level: 7
        additional_layer_depth: 128
      }
    }
    box_coder {
      faster_rcnn_box_coder {
        y_scale: 10.0
        x_scale: 10.0
        height_scale: 5.0
        width_scale: 5.0
      }
    }
    matcher {
      argmax_matcher {
        matched_threshold: 0.5
        unmatched_threshold: 0.5
        ignore_thresholds: false
        negatives_lower_than_unmatched: true
        force_match_for_each_row: true
        use_matmul_gather: true
      }
    }
    similarity_calculator {
      iou_similarity {
      }
    }
    box_predictor {
      weight_shared_convolutional_box_predictor {
        conv_hyperparams {
          regularizer {
            l2_regularizer {
              weight: 3.9999998989515007e-05
            }
          }
          initializer {
            random_normal_initializer {
              mean: 0.0
              stddev: 0.009999999776482582
            }
          }
          activation: RELU_6
          batch_norm {
            decay: 0.996999979019165
            scale: true
            epsilon: 0.0010000000474974513
          }
        }
        depth: 128
        num_layers_before_predictor: 4
        kernel_size: 3
        class_prediction_bias_init: -4.599999904632568
        share_prediction_tower: true
        use_depthwise: true
      }
    }
    anchor_generator {
      multiscale_anchor_generator {
        min_level: 3
        max_level: 7
        anchor_scale: 4.0
        aspect_ratios: 1.0
        aspect_ratios: 2.0
        aspect_ratios: 0.5
        scales_per_octave: 2
      }
    }
    post_processing {
      batch_non_max_suppression {
        score_threshold: 9.99999993922529e-09
        iou_threshold: 0.6000000238418579
        max_detections_per_class: 100
        max_total_detections: 100
        use_static_shapes: false
      }
      score_converter: SIGMOID
    }
    normalize_loss_by_num_matches: true
    loss {
      localization_loss {
        weighted_smooth_l1 {
        }
      }
      classification_loss {
        weighted_sigmoid_focal {
          gamma: 2.0
          alpha: 0.25
        }
      }
      classification_weight: 1.0
      localization_weight: 1.0
    }
    encode_background_as_zeros: true
    normalize_loc_loss_by_codesize: true
    inplace_batchnorm_update: true
    freeze_batchnorm: false
  }
}
train_config {
  batch_size: 128
  data_augmentation_options {
    random_horizontal_flip {
    }
  }
  data_augmentation_options {
    random_crop_image {
      min_object_covered: 0.0
      min_aspect_ratio: 0.75
      max_aspect_ratio: 3.0
      min_area: 0.75
      max_area: 1.0
      overlap_thresh: 0.0
    }
  }
  sync_replicas: true
  optimizer {
    momentum_optimizer {
      learning_rate {
        cosine_decay_learning_rate {
          learning_rate_base: 0.07999999821186066
          total_steps: 50000
          warmup_learning_rate: 0.026666000485420227
          warmup_steps: 1000
        }
      }
      momentum_optimizer_value: 0.8999999761581421
    }
    use_moving_average: false
  }
  fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED"
  num_steps: 50000
  startup_delay_steps: 0.0
  replicas_to_aggregate: 8
  max_number_of_boxes: 100
  unpad_groundtruth_tensors: false
  fine_tune_checkpoint_type: "classification"
  fine_tune_checkpoint_version: V2
}
train_input_reader {
  label_map_path: "PATH_TO_BE_CONFIGURED"
  tf_record_input_reader {
    input_path: "PATH_TO_BE_CONFIGURED"
  }
}
eval_config {
  metrics_set: "coco_detection_metrics"
  use_moving_averages: false
}
eval_input_reader {
  label_map_path: "PATH_TO_BE_CONFIGURED"
  shuffle: false
  num_epochs: 1
  tf_record_input_reader {
    input_path: "PATH_TO_BE_CONFIGURED"
  }
}
In [ ]:
pipeline_config.model.ssd.num_classes = len(labels)
pipeline_config.train_config.batch_size = 4
pipeline_config.train_config.fine_tune_checkpoint = os.path.join(f"/content/Tensorflow/pre-trained-models/{MODEL_NAME}/checkpoint", "ckpt-0")
pipeline_config.train_config.fine_tune_checkpoint_type = "detection"
pipeline_config.train_input_reader.label_map_path= "/content/Tensorflow/training/annotations/label_map.pbtxt"
pipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = ["/content/Tensorflow/training/annotations/train.record"]
pipeline_config.eval_input_reader[0].label_map_path = "/content/Tensorflow/training/annotations/label_map.pbtxt"
pipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = ["/content/Tensorflow/training/annotations/valid.record"]
In [ ]:
# Remove data augmentation
pipeline_config.train_config.ClearField("data_augmentation_options")
In [ ]:
# # Randomly crop images. Allow to slightly change aspect ratios
# from object_detection.protos import preprocessor_pb2
# augmentation = preprocessor_pb2.PreprocessingStep()
# augmentation.random_crop_image.min_object_covered = 0.8
# augmentation.random_crop_image.min_aspect_ratio = 0.8
# augmentation.random_crop_image.max_aspect_ratio = 1.3
# augmentation.random_crop_image.overlap_thresh = 0.5
# augmentation.random_crop_image.clip_boxes = False
# augmentation.random_crop_image.random_coef = 0.125
# pipeline_config.train_config.data_augmentation_options.append(augmentation)

# Add empty data augmentation field
pipeline_config.train_config.data_augmentation_options.add()

# Randomly scale images
from object_detection.protos import preprocessor_pb2
aug = preprocessor_pb2.PreprocessingStep()
aug.random_image_scale.min_scale_ratio = 0.5
aug.random_image_scale.max_scale_ratio = 1.5
pipeline_config.train_config.data_augmentation_options.append(aug)

# # Randomly add black patches
# from object_detection.protos import preprocessor_pb2
# aug = preprocessor_pb2.PreprocessingStep()
# aug.random_black_patches.max_black_patches = 50
# aug.random_black_patches.probability = 0.5
# aug.random_black_patches.size_to_image_ratio = 0.12
# pipeline_config.train_config.data_augmentation_options.append(aug)

# # Random add gaussian patches
# from object_detection.protos import preprocessor_pb2
# aug = preprocessor_pb2.PreprocessingStep()
# aug.random_patch_gaussian.random_coef = 0.5
# aug.random_patch_gaussian.min_patch_size = 10
# aug.random_patch_gaussian.max_patch_size = 100
# aug.random_patch_gaussian.min_gaussian_stddev: 0.2
# aug.random_patch_gaussian.max_gaussian_stddev: 1.5
# pipeline_config.train_config.data_augmentation_options.append(aug)

# Remove the empty augmentation
aug = pipeline_config.train_config.data_augmentation_options[0]
pipeline_config.train_config.data_augmentation_options.remove(aug)
In [ ]:
pipeline_config.train_config
Out[ ]:
batch_size: 4
data_augmentation_options {
  random_image_scale {
    min_scale_ratio: 0.5
    max_scale_ratio: 1.5
  }
}
sync_replicas: true
optimizer {
  momentum_optimizer {
    learning_rate {
      cosine_decay_learning_rate {
        learning_rate_base: 0.07999999821186066
        total_steps: 50000
        warmup_learning_rate: 0.026666000485420227
        warmup_steps: 1000
      }
    }
    momentum_optimizer_value: 0.8999999761581421
  }
  use_moving_average: false
}
fine_tune_checkpoint: "/content/Tensorflow/pre-trained-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0"
num_steps: 50000
startup_delay_steps: 0.0
replicas_to_aggregate: 8
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
fine_tune_checkpoint_type: "detection"
fine_tune_checkpoint_version: V2
In [ ]:
config_text = text_format.MessageToString(pipeline_config)
with tf.io.gfile.GFile(PIPELINE_CONFIG, "wb") as f:
    f.write(config_text)

Finetune model¶

In [ ]:
# Set number of training steps

TRAINING_STEPS=5000
In [ ]:
# Run the training script to finetune model

TRAINING_SCRIPT = "/content/Tensorflow/models/research/object_detection/model_main_tf2.py"
MODEL_DIR = f"/content/Tensorflow/training/models/{MODEL_NAME}"
command = f"python {TRAINING_SCRIPT} --model_dir={MODEL_DIR} --pipeline_config_path={PIPELINE_CONFIG} --num_train_steps={TRAINING_STEPS}"
!{command}
2023-07-24 01:22:10.901237: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so: undefined symbol: _ZN3tsl6Status12empty_stringB5cxx11Ev']
  warnings.warn(f"unable to load libtensorflow_io_plugins.so: {e}")
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so: undefined symbol: _ZNK10tensorflow4data11DatasetBase8FinalizeEPNS_15OpKernelContextESt8functionIFN3tsl8StatusOrISt10unique_ptrIS1_NS5_4core15RefCountDeleterEEEEvEE']
  warnings.warn(f"file system plugins are not loaded: {e}")
2023-07-24 01:22:17.070611: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:47] Overriding orig_value setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
I0724 01:22:17.071912 137702716199552 mirrored_strategy.py:419] Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
INFO:tensorflow:Maybe overwriting train_steps: 5000
I0724 01:22:17.113649 137702716199552 config_util.py:552] Maybe overwriting train_steps: 5000
INFO:tensorflow:Maybe overwriting use_bfloat16: False
I0724 01:22:17.113911 137702716199552 config_util.py:552] Maybe overwriting use_bfloat16: False
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/object_detection/model_lib_v2.py:563: StrategyBase.experimental_distribute_datasets_from_function (from tensorflow.python.distribute.distribute_lib) is deprecated and will be removed in a future version.
Instructions for updating:
rename to distribute_datasets_from_function
W0724 01:22:17.400872 137702716199552 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/object_detection/model_lib_v2.py:563: StrategyBase.experimental_distribute_datasets_from_function (from tensorflow.python.distribute.distribute_lib) is deprecated and will be removed in a future version.
Instructions for updating:
rename to distribute_datasets_from_function
INFO:tensorflow:Reading unweighted datasets: ['/content/Tensorflow/training/annotations/train.record']
I0724 01:22:17.412430 137702716199552 dataset_builder.py:162] Reading unweighted datasets: ['/content/Tensorflow/training/annotations/train.record']
INFO:tensorflow:Reading record datasets for input file: ['/content/Tensorflow/training/annotations/train.record']
I0724 01:22:17.412732 137702716199552 dataset_builder.py:79] Reading record datasets for input file: ['/content/Tensorflow/training/annotations/train.record']
INFO:tensorflow:Number of filenames to read: 1
I0724 01:22:17.412854 137702716199552 dataset_builder.py:80] Number of filenames to read: 1
WARNING:tensorflow:num_readers has been reduced to 1 to match input file shards.
W0724 01:22:17.412937 137702716199552 dataset_builder.py:86] num_readers has been reduced to 1 to match input file shards.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:100: parallel_interleave (from tensorflow.python.data.experimental.ops.interleave_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy execution is desired, use `tf.data.Options.deterministic`.
W0724 01:22:17.423049 137702716199552 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:100: parallel_interleave (from tensorflow.python.data.experimental.ops.interleave_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy execution is desired, use `tf.data.Options.deterministic`.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:235: DatasetV1.map_with_legacy_function (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.map()
W0724 01:22:17.454826 137702716199552 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:235: DatasetV1.map_with_legacy_function (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.map()
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.
W0724 01:22:27.322512 137702716199552 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
W0724 01:22:29.674249 137702716199552 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
/usr/local/lib/python3.10/dist-packages/keras/src/backend.py:452: UserWarning: `tf.keras.backend.set_learning_phase` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model.
  warnings.warn(
I0724 01:22:43.009443 137697864705600 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0724 01:22:53.476431 137697864705600 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
2023-07-24 01:22:57.077010: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 20460000 exceeds 10% of free system memory.
2023-07-24 01:22:57.844432: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 20460000 exceeds 10% of free system memory.
2023-07-24 01:22:57.857750: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 20460000 exceeds 10% of free system memory.
2023-07-24 01:22:57.889805: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 20460000 exceeds 10% of free system memory.
2023-07-24 01:22:57.894877: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 20460000 exceeds 10% of free system memory.
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0724 01:23:08.993748 137702716199552 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0724 01:23:08.998028 137702716199552 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0724 01:23:08.999729 137702716199552 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0724 01:23:09.001095 137702716199552 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0724 01:23:09.006876 137702716199552 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0724 01:23:09.008444 137702716199552 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0724 01:23:09.009964 137702716199552 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0724 01:23:09.011328 137702716199552 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0724 01:23:09.018151 137702716199552 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0724 01:23:09.019752 137702716199552 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/deprecation.py:648: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Use fn_output_signature instead
W0724 01:23:11.444611 137697893021248 deprecation.py:569] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/deprecation.py:648: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Use fn_output_signature instead
I0724 01:23:13.956747 137697893021248 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0724 01:23:20.814094 137697893021248 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0724 01:23:29.217952 137697893021248 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0724 01:23:36.388252 137697893021248 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
INFO:tensorflow:Step 100 per-step time 0.688s
I0724 01:24:19.176022 137702716199552 model_lib_v2.py:705] Step 100 per-step time 0.688s
INFO:tensorflow:{'Loss/classification_loss': 0.40659592,
 'Loss/localization_loss': 0.2644602,
 'Loss/regularization_loss': 0.15200193,
 'Loss/total_loss': 0.82305807,
 'learning_rate': 0.0319994}
I0724 01:24:19.176432 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.40659592,
 'Loss/localization_loss': 0.2644602,
 'Loss/regularization_loss': 0.15200193,
 'Loss/total_loss': 0.82305807,
 'learning_rate': 0.0319994}
INFO:tensorflow:Step 200 per-step time 0.212s
I0724 01:24:40.330739 137702716199552 model_lib_v2.py:705] Step 200 per-step time 0.212s
INFO:tensorflow:{'Loss/classification_loss': 0.50742793,
 'Loss/localization_loss': 0.294548,
 'Loss/regularization_loss': 0.15204826,
 'Loss/total_loss': 0.9540242,
 'learning_rate': 0.0373328}
I0724 01:24:40.331070 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.50742793,
 'Loss/localization_loss': 0.294548,
 'Loss/regularization_loss': 0.15204826,
 'Loss/total_loss': 0.9540242,
 'learning_rate': 0.0373328}
INFO:tensorflow:Step 300 per-step time 0.216s
I0724 01:25:01.932317 137702716199552 model_lib_v2.py:705] Step 300 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.22439004,
 'Loss/localization_loss': 0.13964377,
 'Loss/regularization_loss': 0.15209398,
 'Loss/total_loss': 0.5161278,
 'learning_rate': 0.0426662}
I0724 01:25:01.933461 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.22439004,
 'Loss/localization_loss': 0.13964377,
 'Loss/regularization_loss': 0.15209398,
 'Loss/total_loss': 0.5161278,
 'learning_rate': 0.0426662}
INFO:tensorflow:Step 400 per-step time 0.222s
I0724 01:25:24.176840 137702716199552 model_lib_v2.py:705] Step 400 per-step time 0.222s
INFO:tensorflow:{'Loss/classification_loss': 0.1800599,
 'Loss/localization_loss': 0.07025484,
 'Loss/regularization_loss': 0.15200068,
 'Loss/total_loss': 0.40231544,
 'learning_rate': 0.047999598}
I0724 01:25:24.177469 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.1800599,
 'Loss/localization_loss': 0.07025484,
 'Loss/regularization_loss': 0.15200068,
 'Loss/total_loss': 0.40231544,
 'learning_rate': 0.047999598}
INFO:tensorflow:Step 500 per-step time 0.220s
I0724 01:25:46.203451 137702716199552 model_lib_v2.py:705] Step 500 per-step time 0.220s
INFO:tensorflow:{'Loss/classification_loss': 0.20596375,
 'Loss/localization_loss': 0.10809008,
 'Loss/regularization_loss': 0.151918,
 'Loss/total_loss': 0.46597183,
 'learning_rate': 0.053333}
I0724 01:25:46.203864 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.20596375,
 'Loss/localization_loss': 0.10809008,
 'Loss/regularization_loss': 0.151918,
 'Loss/total_loss': 0.46597183,
 'learning_rate': 0.053333}
INFO:tensorflow:Step 600 per-step time 0.217s
I0724 01:26:07.937399 137702716199552 model_lib_v2.py:705] Step 600 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.25268632,
 'Loss/localization_loss': 0.21420385,
 'Loss/regularization_loss': 0.15183027,
 'Loss/total_loss': 0.6187204,
 'learning_rate': 0.0586664}
I0724 01:26:07.937814 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.25268632,
 'Loss/localization_loss': 0.21420385,
 'Loss/regularization_loss': 0.15183027,
 'Loss/total_loss': 0.6187204,
 'learning_rate': 0.0586664}
INFO:tensorflow:Step 700 per-step time 0.219s
I0724 01:26:29.860457 137702716199552 model_lib_v2.py:705] Step 700 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.1351191,
 'Loss/localization_loss': 0.06700819,
 'Loss/regularization_loss': 0.15206839,
 'Loss/total_loss': 0.35419565,
 'learning_rate': 0.0639998}
I0724 01:26:29.860821 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.1351191,
 'Loss/localization_loss': 0.06700819,
 'Loss/regularization_loss': 0.15206839,
 'Loss/total_loss': 0.35419565,
 'learning_rate': 0.0639998}
INFO:tensorflow:Step 800 per-step time 0.219s
I0724 01:26:51.778202 137702716199552 model_lib_v2.py:705] Step 800 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.13861902,
 'Loss/localization_loss': 0.08541927,
 'Loss/regularization_loss': 0.15202636,
 'Loss/total_loss': 0.37606466,
 'learning_rate': 0.069333196}
I0724 01:26:51.778590 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.13861902,
 'Loss/localization_loss': 0.08541927,
 'Loss/regularization_loss': 0.15202636,
 'Loss/total_loss': 0.37606466,
 'learning_rate': 0.069333196}
INFO:tensorflow:Step 900 per-step time 0.217s
I0724 01:27:13.481017 137702716199552 model_lib_v2.py:705] Step 900 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.17044531,
 'Loss/localization_loss': 0.053716693,
 'Loss/regularization_loss': 0.15202513,
 'Loss/total_loss': 0.37618715,
 'learning_rate': 0.074666604}
I0724 01:27:13.481391 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.17044531,
 'Loss/localization_loss': 0.053716693,
 'Loss/regularization_loss': 0.15202513,
 'Loss/total_loss': 0.37618715,
 'learning_rate': 0.074666604}
INFO:tensorflow:Step 1000 per-step time 0.216s
I0724 01:27:35.120146 137702716199552 model_lib_v2.py:705] Step 1000 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.17771542,
 'Loss/localization_loss': 0.09986747,
 'Loss/regularization_loss': 0.15184921,
 'Loss/total_loss': 0.4294321,
 'learning_rate': 0.08}
I0724 01:27:35.121481 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.17771542,
 'Loss/localization_loss': 0.09986747,
 'Loss/regularization_loss': 0.15184921,
 'Loss/total_loss': 0.4294321,
 'learning_rate': 0.08}
INFO:tensorflow:Step 1100 per-step time 0.238s
I0724 01:27:58.893013 137702716199552 model_lib_v2.py:705] Step 1100 per-step time 0.238s
INFO:tensorflow:{'Loss/classification_loss': 0.15639791,
 'Loss/localization_loss': 0.08071989,
 'Loss/regularization_loss': 0.15173408,
 'Loss/total_loss': 0.38885188,
 'learning_rate': 0.07999918}
I0724 01:27:58.893557 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.15639791,
 'Loss/localization_loss': 0.08071989,
 'Loss/regularization_loss': 0.15173408,
 'Loss/total_loss': 0.38885188,
 'learning_rate': 0.07999918}
INFO:tensorflow:Step 1200 per-step time 0.218s
I0724 01:28:20.722617 137702716199552 model_lib_v2.py:705] Step 1200 per-step time 0.218s
INFO:tensorflow:{'Loss/classification_loss': 0.20038599,
 'Loss/localization_loss': 0.088057704,
 'Loss/regularization_loss': 0.15135747,
 'Loss/total_loss': 0.43980116,
 'learning_rate': 0.079996705}
I0724 01:28:20.723038 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.20038599,
 'Loss/localization_loss': 0.088057704,
 'Loss/regularization_loss': 0.15135747,
 'Loss/total_loss': 0.43980116,
 'learning_rate': 0.079996705}
INFO:tensorflow:Step 1300 per-step time 0.217s
I0724 01:28:42.472071 137702716199552 model_lib_v2.py:705] Step 1300 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.35584718,
 'Loss/localization_loss': 0.14020765,
 'Loss/regularization_loss': 0.15114518,
 'Loss/total_loss': 0.6472,
 'learning_rate': 0.0799926}
I0724 01:28:42.472446 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.35584718,
 'Loss/localization_loss': 0.14020765,
 'Loss/regularization_loss': 0.15114518,
 'Loss/total_loss': 0.6472,
 'learning_rate': 0.0799926}
INFO:tensorflow:Step 1400 per-step time 0.218s
I0724 01:29:04.257754 137702716199552 model_lib_v2.py:705] Step 1400 per-step time 0.218s
INFO:tensorflow:{'Loss/classification_loss': 0.053961918,
 'Loss/localization_loss': 0.03408816,
 'Loss/regularization_loss': 0.15100515,
 'Loss/total_loss': 0.23905523,
 'learning_rate': 0.07998685}
I0724 01:29:04.258173 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.053961918,
 'Loss/localization_loss': 0.03408816,
 'Loss/regularization_loss': 0.15100515,
 'Loss/total_loss': 0.23905523,
 'learning_rate': 0.07998685}
INFO:tensorflow:Step 1500 per-step time 0.219s
I0724 01:29:26.169441 137702716199552 model_lib_v2.py:705] Step 1500 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.20520799,
 'Loss/localization_loss': 0.066441186,
 'Loss/regularization_loss': 0.1506891,
 'Loss/total_loss': 0.42233828,
 'learning_rate': 0.07997945}
I0724 01:29:26.169851 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.20520799,
 'Loss/localization_loss': 0.066441186,
 'Loss/regularization_loss': 0.1506891,
 'Loss/total_loss': 0.42233828,
 'learning_rate': 0.07997945}
INFO:tensorflow:Step 1600 per-step time 0.217s
I0724 01:29:47.908032 137702716199552 model_lib_v2.py:705] Step 1600 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.13433886,
 'Loss/localization_loss': 0.03574656,
 'Loss/regularization_loss': 0.15017995,
 'Loss/total_loss': 0.32026535,
 'learning_rate': 0.079970405}
I0724 01:29:47.908477 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.13433886,
 'Loss/localization_loss': 0.03574656,
 'Loss/regularization_loss': 0.15017995,
 'Loss/total_loss': 0.32026535,
 'learning_rate': 0.079970405}
INFO:tensorflow:Step 1700 per-step time 0.222s
I0724 01:30:10.109457 137702716199552 model_lib_v2.py:705] Step 1700 per-step time 0.222s
INFO:tensorflow:{'Loss/classification_loss': 0.16803668,
 'Loss/localization_loss': 0.038709156,
 'Loss/regularization_loss': 0.14957425,
 'Loss/total_loss': 0.35632008,
 'learning_rate': 0.07995972}
I0724 01:30:10.109895 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.16803668,
 'Loss/localization_loss': 0.038709156,
 'Loss/regularization_loss': 0.14957425,
 'Loss/total_loss': 0.35632008,
 'learning_rate': 0.07995972}
INFO:tensorflow:Step 1800 per-step time 0.218s
I0724 01:30:31.880623 137702716199552 model_lib_v2.py:705] Step 1800 per-step time 0.218s
INFO:tensorflow:{'Loss/classification_loss': 0.13863063,
 'Loss/localization_loss': 0.0511489,
 'Loss/regularization_loss': 0.14905201,
 'Loss/total_loss': 0.33883154,
 'learning_rate': 0.0799474}
I0724 01:30:31.881033 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.13863063,
 'Loss/localization_loss': 0.0511489,
 'Loss/regularization_loss': 0.14905201,
 'Loss/total_loss': 0.33883154,
 'learning_rate': 0.0799474}
INFO:tensorflow:Step 1900 per-step time 0.219s
I0724 01:30:53.780286 137702716199552 model_lib_v2.py:705] Step 1900 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.099541344,
 'Loss/localization_loss': 0.038552277,
 'Loss/regularization_loss': 0.14847873,
 'Loss/total_loss': 0.28657234,
 'learning_rate': 0.07993342}
I0724 01:30:53.780727 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.099541344,
 'Loss/localization_loss': 0.038552277,
 'Loss/regularization_loss': 0.14847873,
 'Loss/total_loss': 0.28657234,
 'learning_rate': 0.07993342}
INFO:tensorflow:Step 2000 per-step time 0.219s
I0724 01:31:15.677552 137702716199552 model_lib_v2.py:705] Step 2000 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.082101114,
 'Loss/localization_loss': 0.043199454,
 'Loss/regularization_loss': 0.14782746,
 'Loss/total_loss': 0.27312803,
 'learning_rate': 0.07991781}
I0724 01:31:15.677968 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.082101114,
 'Loss/localization_loss': 0.043199454,
 'Loss/regularization_loss': 0.14782746,
 'Loss/total_loss': 0.27312803,
 'learning_rate': 0.07991781}
INFO:tensorflow:Step 2100 per-step time 0.236s
I0724 01:31:39.284784 137702716199552 model_lib_v2.py:705] Step 2100 per-step time 0.236s
INFO:tensorflow:{'Loss/classification_loss': 0.066815965,
 'Loss/localization_loss': 0.05227629,
 'Loss/regularization_loss': 0.14715864,
 'Loss/total_loss': 0.2662509,
 'learning_rate': 0.07990056}
I0724 01:31:39.285259 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.066815965,
 'Loss/localization_loss': 0.05227629,
 'Loss/regularization_loss': 0.14715864,
 'Loss/total_loss': 0.2662509,
 'learning_rate': 0.07990056}
INFO:tensorflow:Step 2200 per-step time 0.219s
I0724 01:32:01.135359 137702716199552 model_lib_v2.py:705] Step 2200 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.1769547,
 'Loss/localization_loss': 0.088731624,
 'Loss/regularization_loss': 0.14666228,
 'Loss/total_loss': 0.41234863,
 'learning_rate': 0.07988167}
I0724 01:32:01.135779 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.1769547,
 'Loss/localization_loss': 0.088731624,
 'Loss/regularization_loss': 0.14666228,
 'Loss/total_loss': 0.41234863,
 'learning_rate': 0.07988167}
INFO:tensorflow:Step 2300 per-step time 0.222s
I0724 01:32:23.375084 137702716199552 model_lib_v2.py:705] Step 2300 per-step time 0.222s
INFO:tensorflow:{'Loss/classification_loss': 0.10574483,
 'Loss/localization_loss': 0.04831732,
 'Loss/regularization_loss': 0.14597934,
 'Loss/total_loss': 0.3000415,
 'learning_rate': 0.07986114}
I0724 01:32:23.375508 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.10574483,
 'Loss/localization_loss': 0.04831732,
 'Loss/regularization_loss': 0.14597934,
 'Loss/total_loss': 0.3000415,
 'learning_rate': 0.07986114}
INFO:tensorflow:Step 2400 per-step time 0.219s
I0724 01:32:45.247933 137702716199552 model_lib_v2.py:705] Step 2400 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.07339632,
 'Loss/localization_loss': 0.033077918,
 'Loss/regularization_loss': 0.14521724,
 'Loss/total_loss': 0.25169146,
 'learning_rate': 0.07983897}
I0724 01:32:45.248265 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.07339632,
 'Loss/localization_loss': 0.033077918,
 'Loss/regularization_loss': 0.14521724,
 'Loss/total_loss': 0.25169146,
 'learning_rate': 0.07983897}
INFO:tensorflow:Step 2500 per-step time 0.218s
I0724 01:33:07.072127 137702716199552 model_lib_v2.py:705] Step 2500 per-step time 0.218s
INFO:tensorflow:{'Loss/classification_loss': 0.12791088,
 'Loss/localization_loss': 0.06671654,
 'Loss/regularization_loss': 0.14459127,
 'Loss/total_loss': 0.33921868,
 'learning_rate': 0.079815164}
I0724 01:33:07.072461 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.12791088,
 'Loss/localization_loss': 0.06671654,
 'Loss/regularization_loss': 0.14459127,
 'Loss/total_loss': 0.33921868,
 'learning_rate': 0.079815164}
INFO:tensorflow:Step 2600 per-step time 0.218s
I0724 01:33:28.839857 137702716199552 model_lib_v2.py:705] Step 2600 per-step time 0.218s
INFO:tensorflow:{'Loss/classification_loss': 0.08674487,
 'Loss/localization_loss': 0.039332535,
 'Loss/regularization_loss': 0.14389555,
 'Loss/total_loss': 0.26997295,
 'learning_rate': 0.07978972}
I0724 01:33:28.840193 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.08674487,
 'Loss/localization_loss': 0.039332535,
 'Loss/regularization_loss': 0.14389555,
 'Loss/total_loss': 0.26997295,
 'learning_rate': 0.07978972}
INFO:tensorflow:Step 2700 per-step time 0.217s
I0724 01:33:50.546591 137702716199552 model_lib_v2.py:705] Step 2700 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.036983088,
 'Loss/localization_loss': 0.010271816,
 'Loss/regularization_loss': 0.14321044,
 'Loss/total_loss': 0.19046535,
 'learning_rate': 0.07976264}
I0724 01:33:50.546916 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.036983088,
 'Loss/localization_loss': 0.010271816,
 'Loss/regularization_loss': 0.14321044,
 'Loss/total_loss': 0.19046535,
 'learning_rate': 0.07976264}
INFO:tensorflow:Step 2800 per-step time 0.216s
I0724 01:34:12.158288 137702716199552 model_lib_v2.py:705] Step 2800 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.07081515,
 'Loss/localization_loss': 0.026692975,
 'Loss/regularization_loss': 0.14246945,
 'Loss/total_loss': 0.23997758,
 'learning_rate': 0.07973392}
I0724 01:34:12.158652 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.07081515,
 'Loss/localization_loss': 0.026692975,
 'Loss/regularization_loss': 0.14246945,
 'Loss/total_loss': 0.23997758,
 'learning_rate': 0.07973392}
INFO:tensorflow:Step 2900 per-step time 0.219s
I0724 01:34:34.092929 137702716199552 model_lib_v2.py:705] Step 2900 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.04810733,
 'Loss/localization_loss': 0.022639051,
 'Loss/regularization_loss': 0.1418724,
 'Loss/total_loss': 0.21261878,
 'learning_rate': 0.07970358}
I0724 01:34:34.093356 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.04810733,
 'Loss/localization_loss': 0.022639051,
 'Loss/regularization_loss': 0.1418724,
 'Loss/total_loss': 0.21261878,
 'learning_rate': 0.07970358}
INFO:tensorflow:Step 3000 per-step time 0.219s
I0724 01:34:56.033289 137702716199552 model_lib_v2.py:705] Step 3000 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.04330796,
 'Loss/localization_loss': 0.022319626,
 'Loss/regularization_loss': 0.14115694,
 'Loss/total_loss': 0.20678453,
 'learning_rate': 0.0796716}
I0724 01:34:56.033647 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.04330796,
 'Loss/localization_loss': 0.022319626,
 'Loss/regularization_loss': 0.14115694,
 'Loss/total_loss': 0.20678453,
 'learning_rate': 0.0796716}
INFO:tensorflow:Step 3100 per-step time 0.230s
I0724 01:35:19.056108 137702716199552 model_lib_v2.py:705] Step 3100 per-step time 0.230s
INFO:tensorflow:{'Loss/classification_loss': 0.038636446,
 'Loss/localization_loss': 0.026625399,
 'Loss/regularization_loss': 0.14052132,
 'Loss/total_loss': 0.20578316,
 'learning_rate': 0.07963799}
I0724 01:35:19.056489 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.038636446,
 'Loss/localization_loss': 0.026625399,
 'Loss/regularization_loss': 0.14052132,
 'Loss/total_loss': 0.20578316,
 'learning_rate': 0.07963799}
INFO:tensorflow:Step 3200 per-step time 0.217s
I0724 01:35:40.731414 137702716199552 model_lib_v2.py:705] Step 3200 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.040685352,
 'Loss/localization_loss': 0.023924895,
 'Loss/regularization_loss': 0.13980864,
 'Loss/total_loss': 0.20441888,
 'learning_rate': 0.07960275}
I0724 01:35:40.731773 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.040685352,
 'Loss/localization_loss': 0.023924895,
 'Loss/regularization_loss': 0.13980864,
 'Loss/total_loss': 0.20441888,
 'learning_rate': 0.07960275}
INFO:tensorflow:Step 3300 per-step time 0.216s
I0724 01:36:02.352184 137702716199552 model_lib_v2.py:705] Step 3300 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.061802696,
 'Loss/localization_loss': 0.01994451,
 'Loss/regularization_loss': 0.13906227,
 'Loss/total_loss': 0.22080947,
 'learning_rate': 0.07956588}
I0724 01:36:02.352544 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.061802696,
 'Loss/localization_loss': 0.01994451,
 'Loss/regularization_loss': 0.13906227,
 'Loss/total_loss': 0.22080947,
 'learning_rate': 0.07956588}
INFO:tensorflow:Step 3400 per-step time 0.216s
I0724 01:36:23.949459 137702716199552 model_lib_v2.py:705] Step 3400 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.052114043,
 'Loss/localization_loss': 0.018743087,
 'Loss/regularization_loss': 0.1384247,
 'Loss/total_loss': 0.20928183,
 'learning_rate': 0.079527386}
I0724 01:36:23.949788 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.052114043,
 'Loss/localization_loss': 0.018743087,
 'Loss/regularization_loss': 0.1384247,
 'Loss/total_loss': 0.20928183,
 'learning_rate': 0.079527386}
INFO:tensorflow:Step 3500 per-step time 0.216s
I0724 01:36:45.591435 137702716199552 model_lib_v2.py:705] Step 3500 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.02446295,
 'Loss/localization_loss': 0.02287177,
 'Loss/regularization_loss': 0.1377127,
 'Loss/total_loss': 0.18504742,
 'learning_rate': 0.07948727}
I0724 01:36:45.591768 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.02446295,
 'Loss/localization_loss': 0.02287177,
 'Loss/regularization_loss': 0.1377127,
 'Loss/total_loss': 0.18504742,
 'learning_rate': 0.07948727}
INFO:tensorflow:Step 3600 per-step time 0.221s
I0724 01:37:07.660271 137702716199552 model_lib_v2.py:705] Step 3600 per-step time 0.221s
INFO:tensorflow:{'Loss/classification_loss': 0.05925524,
 'Loss/localization_loss': 0.014394746,
 'Loss/regularization_loss': 0.13700259,
 'Loss/total_loss': 0.21065257,
 'learning_rate': 0.079445526}
I0724 01:37:07.660688 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.05925524,
 'Loss/localization_loss': 0.014394746,
 'Loss/regularization_loss': 0.13700259,
 'Loss/total_loss': 0.21065257,
 'learning_rate': 0.079445526}
INFO:tensorflow:Step 3700 per-step time 0.218s
I0724 01:37:29.501527 137702716199552 model_lib_v2.py:705] Step 3700 per-step time 0.218s
INFO:tensorflow:{'Loss/classification_loss': 0.039896466,
 'Loss/localization_loss': 0.02692235,
 'Loss/regularization_loss': 0.13638528,
 'Loss/total_loss': 0.2032041,
 'learning_rate': 0.07940216}
I0724 01:37:29.501946 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.039896466,
 'Loss/localization_loss': 0.02692235,
 'Loss/regularization_loss': 0.13638528,
 'Loss/total_loss': 0.2032041,
 'learning_rate': 0.07940216}
INFO:tensorflow:Step 3800 per-step time 0.220s
I0724 01:37:51.481154 137702716199552 model_lib_v2.py:705] Step 3800 per-step time 0.220s
INFO:tensorflow:{'Loss/classification_loss': 0.038175188,
 'Loss/localization_loss': 0.020892508,
 'Loss/regularization_loss': 0.13585128,
 'Loss/total_loss': 0.19491898,
 'learning_rate': 0.079357184}
I0724 01:37:51.481600 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.038175188,
 'Loss/localization_loss': 0.020892508,
 'Loss/regularization_loss': 0.13585128,
 'Loss/total_loss': 0.19491898,
 'learning_rate': 0.079357184}
INFO:tensorflow:Step 3900 per-step time 0.219s
I0724 01:38:13.411234 137702716199552 model_lib_v2.py:705] Step 3900 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.052402988,
 'Loss/localization_loss': 0.022097362,
 'Loss/regularization_loss': 0.13513027,
 'Loss/total_loss': 0.20963062,
 'learning_rate': 0.07931058}
I0724 01:38:13.411828 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.052402988,
 'Loss/localization_loss': 0.022097362,
 'Loss/regularization_loss': 0.13513027,
 'Loss/total_loss': 0.20963062,
 'learning_rate': 0.07931058}
INFO:tensorflow:Step 4000 per-step time 0.220s
I0724 01:38:35.367775 137702716199552 model_lib_v2.py:705] Step 4000 per-step time 0.220s
INFO:tensorflow:{'Loss/classification_loss': 0.032382403,
 'Loss/localization_loss': 0.031490225,
 'Loss/regularization_loss': 0.13441578,
 'Loss/total_loss': 0.19828841,
 'learning_rate': 0.07926236}
I0724 01:38:35.368221 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.032382403,
 'Loss/localization_loss': 0.031490225,
 'Loss/regularization_loss': 0.13441578,
 'Loss/total_loss': 0.19828841,
 'learning_rate': 0.07926236}
INFO:tensorflow:Step 4100 per-step time 0.237s
I0724 01:38:59.037456 137702716199552 model_lib_v2.py:705] Step 4100 per-step time 0.237s
INFO:tensorflow:{'Loss/classification_loss': 0.045272242,
 'Loss/localization_loss': 0.014247153,
 'Loss/regularization_loss': 0.13371897,
 'Loss/total_loss': 0.19323836,
 'learning_rate': 0.07921253}
I0724 01:38:59.037888 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.045272242,
 'Loss/localization_loss': 0.014247153,
 'Loss/regularization_loss': 0.13371897,
 'Loss/total_loss': 0.19323836,
 'learning_rate': 0.07921253}
INFO:tensorflow:Step 4200 per-step time 0.222s
I0724 01:39:21.198901 137702716199552 model_lib_v2.py:705] Step 4200 per-step time 0.222s
INFO:tensorflow:{'Loss/classification_loss': 0.028220486,
 'Loss/localization_loss': 0.020325644,
 'Loss/regularization_loss': 0.13302359,
 'Loss/total_loss': 0.18156973,
 'learning_rate': 0.07916109}
I0724 01:39:21.199270 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.028220486,
 'Loss/localization_loss': 0.020325644,
 'Loss/regularization_loss': 0.13302359,
 'Loss/total_loss': 0.18156973,
 'learning_rate': 0.07916109}
INFO:tensorflow:Step 4300 per-step time 0.217s
I0724 01:39:42.929188 137702716199552 model_lib_v2.py:705] Step 4300 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.011768773,
 'Loss/localization_loss': 0.0076962975,
 'Loss/regularization_loss': 0.13229266,
 'Loss/total_loss': 0.15175773,
 'learning_rate': 0.07910804}
I0724 01:39:42.929640 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.011768773,
 'Loss/localization_loss': 0.0076962975,
 'Loss/regularization_loss': 0.13229266,
 'Loss/total_loss': 0.15175773,
 'learning_rate': 0.07910804}
INFO:tensorflow:Step 4400 per-step time 0.217s
I0724 01:40:04.617190 137702716199552 model_lib_v2.py:705] Step 4400 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.011322817,
 'Loss/localization_loss': 0.008279241,
 'Loss/regularization_loss': 0.13158473,
 'Loss/total_loss': 0.1511868,
 'learning_rate': 0.07905338}
I0724 01:40:04.617601 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.011322817,
 'Loss/localization_loss': 0.008279241,
 'Loss/regularization_loss': 0.13158473,
 'Loss/total_loss': 0.1511868,
 'learning_rate': 0.07905338}
INFO:tensorflow:Step 4500 per-step time 0.217s
I0724 01:40:26.355925 137702716199552 model_lib_v2.py:705] Step 4500 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.042676993,
 'Loss/localization_loss': 0.018480267,
 'Loss/regularization_loss': 0.1309092,
 'Loss/total_loss': 0.19206646,
 'learning_rate': 0.07899711}
I0724 01:40:26.356299 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.042676993,
 'Loss/localization_loss': 0.018480267,
 'Loss/regularization_loss': 0.1309092,
 'Loss/total_loss': 0.19206646,
 'learning_rate': 0.07899711}
INFO:tensorflow:Step 4600 per-step time 0.216s
I0724 01:40:47.914191 137702716199552 model_lib_v2.py:705] Step 4600 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.02154037,
 'Loss/localization_loss': 0.020086322,
 'Loss/regularization_loss': 0.13022941,
 'Loss/total_loss': 0.1718561,
 'learning_rate': 0.078939244}
I0724 01:40:47.914592 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.02154037,
 'Loss/localization_loss': 0.020086322,
 'Loss/regularization_loss': 0.13022941,
 'Loss/total_loss': 0.1718561,
 'learning_rate': 0.078939244}
INFO:tensorflow:Step 4700 per-step time 0.216s
I0724 01:41:09.530660 137702716199552 model_lib_v2.py:705] Step 4700 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.033489935,
 'Loss/localization_loss': 0.01978143,
 'Loss/regularization_loss': 0.12953117,
 'Loss/total_loss': 0.18280254,
 'learning_rate': 0.07887978}
I0724 01:41:09.531036 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.033489935,
 'Loss/localization_loss': 0.01978143,
 'Loss/regularization_loss': 0.12953117,
 'Loss/total_loss': 0.18280254,
 'learning_rate': 0.07887978}
INFO:tensorflow:Step 4800 per-step time 0.216s
I0724 01:41:31.173742 137702716199552 model_lib_v2.py:705] Step 4800 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.019300217,
 'Loss/localization_loss': 0.010951835,
 'Loss/regularization_loss': 0.1288211,
 'Loss/total_loss': 0.15907316,
 'learning_rate': 0.07881871}
I0724 01:41:31.174106 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.019300217,
 'Loss/localization_loss': 0.010951835,
 'Loss/regularization_loss': 0.1288211,
 'Loss/total_loss': 0.15907316,
 'learning_rate': 0.07881871}
INFO:tensorflow:Step 4900 per-step time 0.218s
I0724 01:41:52.987815 137702716199552 model_lib_v2.py:705] Step 4900 per-step time 0.218s
INFO:tensorflow:{'Loss/classification_loss': 0.045773715,
 'Loss/localization_loss': 0.014094334,
 'Loss/regularization_loss': 0.12813042,
 'Loss/total_loss': 0.18799847,
 'learning_rate': 0.07875605}
I0724 01:41:52.988214 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.045773715,
 'Loss/localization_loss': 0.014094334,
 'Loss/regularization_loss': 0.12813042,
 'Loss/total_loss': 0.18799847,
 'learning_rate': 0.07875605}
INFO:tensorflow:Step 5000 per-step time 0.219s
I0724 01:42:14.921013 137702716199552 model_lib_v2.py:705] Step 5000 per-step time 0.219s
INFO:tensorflow:{'Loss/classification_loss': 0.046769343,
 'Loss/localization_loss': 0.02401721,
 'Loss/regularization_loss': 0.127439,
 'Loss/total_loss': 0.19822556,
 'learning_rate': 0.078691795}
I0724 01:42:14.921431 137702716199552 model_lib_v2.py:708] {'Loss/classification_loss': 0.046769343,
 'Loss/localization_loss': 0.02401721,
 'Loss/regularization_loss': 0.127439,
 'Loss/total_loss': 0.19822556,
 'learning_rate': 0.078691795}

Evaluate¶

In [ ]:
# Install this package for compatibility

!pip install pillow==9.5
Collecting pillow==9.5
  Downloading Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl (3.4 MB)
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 3.4/3.4 MB 25.5 MB/s eta 0:00:00
Installing collected packages: pillow
  Attempting uninstall: pillow
    Found existing installation: Pillow 8.4.0
    Uninstalling Pillow-8.4.0:
      Successfully uninstalled Pillow-8.4.0
Successfully installed pillow-9.5.0

Mean Average Precision Formula:
$$ mAP = \frac{1}{N}\sum\limits_{i=1}^N AP_i \\ $$ where AP = Average Precision

In [ ]:
# Evaluate the tuned model on validation set

command = f"python {TRAINING_SCRIPT} --model_dir={MODEL_DIR} --pipeline_config_path={PIPELINE_CONFIG} --checkpoint_dir={MODEL_DIR}"
!{command}
2023-07-24 01:43:24.994952: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so: undefined symbol: _ZN3tsl6Status12empty_stringB5cxx11Ev']
  warnings.warn(f"unable to load libtensorflow_io_plugins.so: {e}")
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so: undefined symbol: _ZNK10tensorflow4data11DatasetBase8FinalizeEPNS_15OpKernelContextESt8functionIFN3tsl8StatusOrISt10unique_ptrIS1_NS5_4core15RefCountDeleterEEEEvEE']
  warnings.warn(f"file system plugins are not loaded: {e}")
WARNING:tensorflow:Forced number of epochs for all eval validations to be 1.
W0724 01:43:27.845376 136527414276736 model_lib_v2.py:1089] Forced number of epochs for all eval validations to be 1.
INFO:tensorflow:Maybe overwriting sample_1_of_n_eval_examples: None
I0724 01:43:27.845603 136527414276736 config_util.py:552] Maybe overwriting sample_1_of_n_eval_examples: None
INFO:tensorflow:Maybe overwriting use_bfloat16: False
I0724 01:43:27.845687 136527414276736 config_util.py:552] Maybe overwriting use_bfloat16: False
INFO:tensorflow:Maybe overwriting eval_num_epochs: 1
I0724 01:43:27.845772 136527414276736 config_util.py:552] Maybe overwriting eval_num_epochs: 1
WARNING:tensorflow:Expected number of evaluation epochs is 1, but instead encountered `eval_on_train_input_config.num_epochs` = 0. Overwriting `num_epochs` to 1.
W0724 01:43:27.845886 136527414276736 model_lib_v2.py:1106] Expected number of evaluation epochs is 1, but instead encountered `eval_on_train_input_config.num_epochs` = 0. Overwriting `num_epochs` to 1.
2023-07-24 01:43:28.818162: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:47] Overriding orig_value setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
INFO:tensorflow:Reading unweighted datasets: ['/content/Tensorflow/training/annotations/valid.record']
I0724 01:43:29.029720 136527414276736 dataset_builder.py:162] Reading unweighted datasets: ['/content/Tensorflow/training/annotations/valid.record']
INFO:tensorflow:Reading record datasets for input file: ['/content/Tensorflow/training/annotations/valid.record']
I0724 01:43:29.029996 136527414276736 dataset_builder.py:79] Reading record datasets for input file: ['/content/Tensorflow/training/annotations/valid.record']
INFO:tensorflow:Number of filenames to read: 1
I0724 01:43:29.030104 136527414276736 dataset_builder.py:80] Number of filenames to read: 1
WARNING:tensorflow:num_readers has been reduced to 1 to match input file shards.
W0724 01:43:29.030181 136527414276736 dataset_builder.py:86] num_readers has been reduced to 1 to match input file shards.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:100: parallel_interleave (from tensorflow.python.data.experimental.ops.interleave_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy execution is desired, use `tf.data.Options.deterministic`.
W0724 01:43:29.034907 136527414276736 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:100: parallel_interleave (from tensorflow.python.data.experimental.ops.interleave_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy execution is desired, use `tf.data.Options.deterministic`.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:235: DatasetV1.map_with_legacy_function (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.map()
W0724 01:43:29.056430 136527414276736 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:235: DatasetV1.map_with_legacy_function (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.map()
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.
W0724 01:43:38.771552 136527414276736 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
W0724 01:43:43.818725 136527414276736 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
INFO:tensorflow:Waiting for new checkpoint at /content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8
I0724 01:43:49.453681 136527414276736 checkpoint_utils.py:168] Waiting for new checkpoint at /content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8
INFO:tensorflow:Found new checkpoint at /content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/ckpt-6
I0724 01:43:49.454857 136527414276736 checkpoint_utils.py:177] Found new checkpoint at /content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/ckpt-6
/usr/local/lib/python3.10/dist-packages/keras/src/backend.py:452: UserWarning: `tf.keras.backend.set_learning_phase` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model.
  warnings.warn(
I0724 01:43:56.174112 136527414276736 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0724 01:44:16.357409 136527414276736 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_int64 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
W0724 01:44:25.609712 136527414276736 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_int64 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
INFO:tensorflow:Finished eval step 0
I0724 01:44:25.654124 136527414276736 model_lib_v2.py:966] Finished eval step 0
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py:460: py_func (from tensorflow.python.ops.script_ops) is deprecated and will be removed in a future version.
Instructions for updating:
tf.py_func is deprecated in TF V2. Instead, there are two
    options available in V2.
    - tf.py_function takes a python function which manipulates tf eager
    tensors instead of numpy arrays. It's easy to convert a tf eager tensor to
    an ndarray (just call tensor.numpy()) but having access to eager tensors
    means `tf.py_function`s can use accelerators such as GPUs as well as
    being differentiable using a gradient tape.
    - tf.numpy_function maintains the semantics of the deprecated tf.py_func
    (it is not differentiable, and manipulates numpy arrays). It drops the
    stateful argument making all functions stateful.
    
W0724 01:44:25.899996 136527414276736 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py:460: py_func (from tensorflow.python.ops.script_ops) is deprecated and will be removed in a future version.
Instructions for updating:
tf.py_func is deprecated in TF V2. Instead, there are two
    options available in V2.
    - tf.py_function takes a python function which manipulates tf eager
    tensors instead of numpy arrays. It's easy to convert a tf eager tensor to
    an ndarray (just call tensor.numpy()) but having access to eager tensors
    means `tf.py_function`s can use accelerators such as GPUs as well as
    being differentiable using a gradient tape.
    - tf.numpy_function maintains the semantics of the deprecated tf.py_func
    (it is not differentiable, and manipulates numpy arrays). It drops the
    stateful argument making all functions stateful.
    
INFO:tensorflow:Finished eval step 100
I0724 01:44:33.764549 136527414276736 model_lib_v2.py:966] Finished eval step 100
INFO:tensorflow:Performing evaluation on 176 images.
I0724 01:45:11.917804 136527414276736 coco_evaluation.py:293] Performing evaluation on 176 images.
creating index...
index created!
INFO:tensorflow:Loading and preparing annotation results...
I0724 01:45:11.920135 136527414276736 coco_tools.py:116] Loading and preparing annotation results...
INFO:tensorflow:DONE (t=0.01s)
I0724 01:45:11.932274 136527414276736 coco_tools.py:138] DONE (t=0.01s)
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=0.43s).
Accumulating evaluation results...
DONE (t=0.25s).
 Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.660
 Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = 0.868
 Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = 0.757
 Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.516
 Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.715
 Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.761
 Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = 0.643
 Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = 0.709
 Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.716
 Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.542
 Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.766
 Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.801
INFO:tensorflow:Eval metrics at step 5000
I0724 01:45:12.650180 136527414276736 model_lib_v2.py:1015] Eval metrics at step 5000
INFO:tensorflow:	+ DetectionBoxes_Precision/mAP: 0.659759
I0724 01:45:12.661122 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Precision/mAP: 0.659759
INFO:tensorflow:	+ DetectionBoxes_Precision/mAP@.50IOU: 0.868079
I0724 01:45:12.662908 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Precision/mAP@.50IOU: 0.868079
INFO:tensorflow:	+ DetectionBoxes_Precision/mAP@.75IOU: 0.756506
I0724 01:45:12.664538 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Precision/mAP@.75IOU: 0.756506
INFO:tensorflow:	+ DetectionBoxes_Precision/mAP (small): 0.516324
I0724 01:45:12.666178 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Precision/mAP (small): 0.516324
INFO:tensorflow:	+ DetectionBoxes_Precision/mAP (medium): 0.715297
I0724 01:45:12.667772 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Precision/mAP (medium): 0.715297
INFO:tensorflow:	+ DetectionBoxes_Precision/mAP (large): 0.761463
I0724 01:45:12.669305 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Precision/mAP (large): 0.761463
INFO:tensorflow:	+ DetectionBoxes_Recall/AR@1: 0.642960
I0724 01:45:12.670761 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Recall/AR@1: 0.642960
INFO:tensorflow:	+ DetectionBoxes_Recall/AR@10: 0.709131
I0724 01:45:12.672214 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Recall/AR@10: 0.709131
INFO:tensorflow:	+ DetectionBoxes_Recall/AR@100: 0.716104
I0724 01:45:12.673728 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Recall/AR@100: 0.716104
INFO:tensorflow:	+ DetectionBoxes_Recall/AR@100 (small): 0.541813
I0724 01:45:12.675182 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Recall/AR@100 (small): 0.541813
INFO:tensorflow:	+ DetectionBoxes_Recall/AR@100 (medium): 0.765528
I0724 01:45:12.676723 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Recall/AR@100 (medium): 0.765528
INFO:tensorflow:	+ DetectionBoxes_Recall/AR@100 (large): 0.801486
I0724 01:45:12.678280 136527414276736 model_lib_v2.py:1018] 	+ DetectionBoxes_Recall/AR@100 (large): 0.801486
INFO:tensorflow:	+ Loss/localization_loss: 0.046551
I0724 01:45:12.679392 136527414276736 model_lib_v2.py:1018] 	+ Loss/localization_loss: 0.046551
INFO:tensorflow:	+ Loss/classification_loss: 0.161588
I0724 01:45:12.680496 136527414276736 model_lib_v2.py:1018] 	+ Loss/classification_loss: 0.161588
INFO:tensorflow:	+ Loss/regularization_loss: 0.127433
I0724 01:45:12.681620 136527414276736 model_lib_v2.py:1018] 	+ Loss/regularization_loss: 0.127433
INFO:tensorflow:	+ Loss/total_loss: 0.335572
I0724 01:45:12.682745 136527414276736 model_lib_v2.py:1018] 	+ Loss/total_loss: 0.335572
INFO:tensorflow:Waiting for new checkpoint at /content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8
I0724 01:48:49.554483 136527414276736 checkpoint_utils.py:168] Waiting for new checkpoint at /content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8
Traceback (most recent call last):
  File "/usr/local/lib/python3.10/dist-packages/absl/app.py", line 308, in run
    _run_main(main, args)
  File "/usr/local/lib/python3.10/dist-packages/absl/app.py", line 254, in _run_main
    sys.exit(main(argv))
  File "/content/Tensorflow/models/research/object_detection/model_main_tf2.py", line 81, in main
    model_lib_v2.eval_continuously(
  File "/usr/local/lib/python3.10/dist-packages/object_detection/model_lib_v2.py", line 1135, in eval_continuously
    for latest_checkpoint in tf.train.checkpoints_iterator(
  File "/usr/local/lib/python3.10/dist-packages/tensorflow/python/training/checkpoint_utils.py", line 226, in checkpoints_iterator
    new_checkpoint_path = wait_for_new_checkpoint(
  File "/usr/local/lib/python3.10/dist-packages/tensorflow/python/training/checkpoint_utils.py", line 175, in wait_for_new_checkpoint
    time.sleep(seconds_to_sleep)
KeyboardInterrupt

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "/content/Tensorflow/models/research/object_detection/model_main_tf2.py", line 114, in <module>
    tf.compat.v1.app.run()
  File "/usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/app.py", line 36, in run
    _run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
  File "/usr/local/lib/python3.10/dist-packages/absl/app.py", line 308, in run
    _run_main(main, args)
KeyboardInterrupt
^C

Export model¶

In [ ]:
# Copy exporter script from Tensorflow/models/research/ to Tensorflow/training/

shutil.copy("models/research/object_detection/exporter_main_v2.py", "training")
Out[ ]:
'training/exporter_main_v2.py'
In [ ]:
# Run the exporter script to save the model

%cd training
command = f"python exporter_main_v2.py --input_type image_tensor --pipeline_config_path {PIPELINE_CONFIG} --trained_checkpoint_dir {MODEL_DIR} --output_directory exported-models/{MODEL_NAME}"
!{command}
/content/Tensorflow/training
2023-07-24 01:49:34.397025: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so: undefined symbol: _ZN3tsl6Status12empty_stringB5cxx11Ev']
  warnings.warn(f"unable to load libtensorflow_io_plugins.so: {e}")
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so: undefined symbol: _ZNK10tensorflow4data11DatasetBase8FinalizeEPNS_15OpKernelContextESt8functionIFN3tsl8StatusOrISt10unique_ptrIS1_NS5_4core15RefCountDeleterEEEEvEE']
  warnings.warn(f"file system plugins are not loaded: {e}")
2023-07-24 01:49:37.768151: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:47] Overriding orig_value setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py:459: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with back_prop=False is deprecated and will be removed in a future version.
Instructions for updating:
back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.map_fn(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))
W0724 01:49:38.079579 140485910614656 deprecation.py:641] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py:459: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with back_prop=False is deprecated and will be removed in a future version.
Instructions for updating:
back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.map_fn(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))
I0724 01:49:42.031505 140485910614656 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0724 01:49:55.999751 140485910614656 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0724 01:49:59.267247 140485910614656 signature_serialization.py:148] Function `call_func` contains input name(s) resource with unsupported characters which will be renamed to weightsharedconvolutionalboxpredictor_predictiontower_conv2d_3_batchnorm_feature_4_fusedbatchnormv3_readvariableop_1_resource in the SavedModel.
I0724 01:50:00.510791 140485910614656 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7fc4fe6b2fb0>, because it is not built.
W0724 01:50:03.098042 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7fc4fe6b2fb0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7fc4b39e7e80>, because it is not built.
W0724 01:50:03.384250 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7fc4b39e7e80>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478596140>, because it is not built.
W0724 01:50:03.384488 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478596140>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478596d40>, because it is not built.
W0724 01:50:03.384604 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478596d40>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7fc478594910>, because it is not built.
W0724 01:50:03.384692 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7fc478594910>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478594eb0>, because it is not built.
W0724 01:50:03.384780 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478594eb0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478595150>, because it is not built.
W0724 01:50:03.384859 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478595150>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7fc478596260>, because it is not built.
W0724 01:50:03.384934 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7fc478596260>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478595e40>, because it is not built.
W0724 01:50:03.385008 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478595e40>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478596890>, because it is not built.
W0724 01:50:03.385081 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478596890>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7fc478594310>, because it is not built.
W0724 01:50:03.385152 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7fc478594310>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4785959c0>, because it is not built.
W0724 01:50:03.385223 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4785959c0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478597790>, because it is not built.
W0724 01:50:03.385294 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478597790>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478596ce0>, because it is not built.
W0724 01:50:03.385379 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478596ce0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478574ac0>, because it is not built.
W0724 01:50:03.385453 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478574ac0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478577640>, because it is not built.
W0724 01:50:03.385531 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478577640>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4785759f0>, because it is not built.
W0724 01:50:03.385603 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4785759f0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478577a00>, because it is not built.
W0724 01:50:03.385674 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478577a00>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478576950>, because it is not built.
W0724 01:50:03.385746 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478576950>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478577550>, because it is not built.
W0724 01:50:03.385817 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478577550>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478577520>, because it is not built.
W0724 01:50:03.385899 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc478577520>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478576260>, because it is not built.
W0724 01:50:03.385970 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc478576260>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4dc12efb0>, because it is not built.
W0724 01:50:03.386055 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4dc12efb0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4dc12ed40>, because it is not built.
W0724 01:50:03.386146 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4dc12ed40>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4dc0ec2e0>, because it is not built.
W0724 01:50:03.386219 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4dc0ec2e0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc489bd2e90>, because it is not built.
W0724 01:50:03.386291 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc489bd2e90>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc489bd1e10>, because it is not built.
W0724 01:50:03.386375 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc489bd1e10>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc489bd05b0>, because it is not built.
W0724 01:50:03.386449 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc489bd05b0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc489bd2470>, because it is not built.
W0724 01:50:03.386526 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc489bd2470>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc489bd2590>, because it is not built.
W0724 01:50:03.386597 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc489bd2590>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc489bd12d0>, because it is not built.
W0724 01:50:03.386669 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc489bd12d0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc489bd3220>, because it is not built.
W0724 01:50:03.386742 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc489bd3220>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc49bf08f10>, because it is not built.
W0724 01:50:03.386813 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc49bf08f10>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc49bf09900>, because it is not built.
W0724 01:50:03.386884 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc49bf09900>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc49bf08ca0>, because it is not built.
W0724 01:50:03.386956 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc49bf08ca0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc49bf08b50>, because it is not built.
W0724 01:50:03.387027 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc49bf08b50>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc49bf08bb0>, because it is not built.
W0724 01:50:03.387102 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc49bf08bb0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4dc7acb80>, because it is not built.
W0724 01:50:03.387174 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4dc7acb80>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4785f6ce0>, because it is not built.
W0724 01:50:03.387244 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4785f6ce0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4785f6fb0>, because it is not built.
W0724 01:50:03.387315 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4785f6fb0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4785f5270>, because it is not built.
W0724 01:50:03.387399 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4785f5270>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4785f71f0>, because it is not built.
W0724 01:50:03.387484 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4785f71f0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4785f7820>, because it is not built.
W0724 01:50:03.387559 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4785f7820>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4785f58a0>, because it is not built.
W0724 01:50:03.387631 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7fc4785f58a0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4785f5810>, because it is not built.
W0724 01:50:03.450621 140485910614656 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7fc4785f5810>, because it is not built.
I0724 01:50:20.916862 140485910614656 save.py:274] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalClassHead_layer_call_fn while saving (showing 5 of 173). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: exported-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/assets
I0724 01:50:27.235861 140485910614656 builder_impl.py:804] Assets written to: exported-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/assets
I0724 01:50:27.540453 140485910614656 fingerprinting_utils.py:48] Writing fingerprint to exported-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/fingerprint.pb
INFO:tensorflow:Writing pipeline config file to exported-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config
I0724 01:50:28.091818 140485910614656 config_util.py:253] Writing pipeline config file to exported-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config
In [ ]:
# Zip the folder containing the finetuned model

%cd exported-models
command = f"zip -r {MODEL_NAME}.zip {MODEL_NAME}"
!{command}
%cd ..
/content/Tensorflow/training/exported-models
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/ (stored 0%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/ (stored 0%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/saved_model.pb (deflated 92%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/fingerprint.pb (stored 0%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/assets/ (stored 0%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/variables/ (stored 0%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/variables/variables.index (deflated 78%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/variables/variables.data-00000-of-00001 (deflated 9%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ (stored 0%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0.index (deflated 80%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/checkpoint (deflated 41%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0.data-00000-of-00001 (deflated 8%)
  adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config (deflated 69%)
/content/Tensorflow/training

Inference¶

Note: Restart runtime to avoid error.
After restarting runtime, the current directory is /content/

In [ ]:
import time
import os
import cv2
import tensorflow as tf
import numpy as np
import gdown
from zipfile import ZipFile
from google.colab.patches import cv2_imshow
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils

Note: Update the correct MODEL NAME here for inference

In [ ]:
MODEL_NAME = "ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8"
In [ ]:
TEST_MODEL_PATH = f"Tensorflow/testing/{MODEL_NAME}/saved_model"
CONFIG_PATH = f"Tensorflow/training/models/{MODEL_NAME}/pipeline.config"
CKPT_PATH = f"Tensorflow/training/models/{MODEL_NAME}"
In [ ]:
# Create a new directory for evaluation: Tensorflow/testing

if not os.path.exists("Tensorflow/testing"):
    os.mkdir("Tensorflow/testing")
In [ ]:
# Download finetuned model. Follow these steps:

# The finetuned model is store as zip file in google drive with share link available
# For example, if the share link is: https://drive.google.com/file/d/1FfSKlAGV_Z-GfvVx03Wkpxuh8r876HXq/view?usp=drive_link
# The id of the files will be: 1FfSKlAGV_Z-GfvVx03Wkpxuh8r876HXq (between /d/ and /view)
# Copy and append the id to https://drive.google.com/uc?export=download&id=
# The final downloadable address is: https://drive.google.com/uc?export=download&id=1FfSKlAGV_Z-GfvVx03Wkpxuh8r876HXq

gdown.download("https://drive.google.com/uc?export=download&id=1Gik-5Fy_oh19mqF2Gaw9cmlQ9wnx2C-W")

with ZipFile(f"{MODEL_NAME}.zip") as zipfile:
    zipfile.extractall(f"Tensorflow/testing/")

command = f"rm {MODEL_NAME}.zip"
!{command}
Downloading...
From: https://drive.google.com/uc?export=download&id=1Gik-5Fy_oh19mqF2Gaw9cmlQ9wnx2C-W
To: /content/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.zip
100%|██████████| 19.9M/19.9M [00:00<00:00, 26.0MB/s]
In [ ]:
# Load finetuned model
test_model = tf.saved_model.load(TEST_MODEL_PATH)

# Create new director Tensorflow/testing/test_images to store test images
if not os.path.exists("Tensorflow/testing"):
    os.mkdir("Tensorflow/testing")
if not os.path.exists("Tensorflow/testing/test_images"):
    os.mkdir("Tensorflow/testing/test_images")

TEST_IMAGE_PATH = "Tensorflow/testing/test_images/test_image.png"

# Load label map file
category_index=label_map_util.create_category_index_from_labelmap("Tensorflow/training/annotations/label_map.pbtxt", use_display_name=True)


# Function to plot image with detections

def plot_detections(image_path=TEST_IMAGE_PATH):
    # Read test image
    image_np = cv2.imread(TEST_IMAGE_PATH)
    input_tensor = tf.convert_to_tensor(image_np)
    # The model expects a batch of images, so add an axis with `tf.newaxis`.
    input_tensor = input_tensor[tf.newaxis, ...]

    start_time = time.time()
    detections = test_model(input_tensor)
    end_time = time.time()
    elapsed_time = end_time - start_time
    print("Model runtime:", round(elapsed_time, 3), "seconds")

    # All outputs are batches tensors.
    # Convert to numpy arrays, and take index [0] to remove the batch dimension.
    # We're only interested in the first num_detections.
    num_detections = int(detections.pop('num_detections'))
    detections = {key: value[0, :num_detections].numpy()
                    for key, value in detections.items()}
    detections['num_detections'] = num_detections

    # detection_classes should be ints.
    detections['detection_classes'] = detections['detection_classes'].astype(np.int64)

    image_np_with_detections = image_np.copy()
    image_np_with_detections_uint8 = image_np_with_detections.astype(np.uint8)

    viz_utils.visualize_boxes_and_labels_on_image_array(
            image_np_with_detections_uint8,
            detections['detection_boxes'],
            detections['detection_classes'],
            detections['detection_scores'],
            category_index,
            use_normalized_coordinates=True,
            max_boxes_to_draw=200,
            min_score_thresh=.30,
            agnostic_mode=False)

    cv2_imshow(image_np_with_detections_uint8)
In [ ]:
# These are links to some sample images for testing:

gdown.download("https://drive.google.com/uc?export=download&id=1GG8l6W9U13G7Pq7NodSty6jcCmNALVmf", TEST_IMAGE_PATH)
plot_detections(TEST_IMAGE_PATH)
Downloading...
From: https://drive.google.com/uc?export=download&id=1GG8l6W9U13G7Pq7NodSty6jcCmNALVmf
To: /content/Tensorflow/testing/test_images/test_image.png
100%|██████████| 62.6k/62.6k [00:00<00:00, 48.5MB/s]
Model runtime: 0.048 seconds

In [ ]:
gdown.download("https://drive.google.com/uc?export=download&id=1Fd_qrsfrUtuJFpbBnd9C60hBPdbvVgzT", TEST_IMAGE_PATH)
plot_detections(TEST_IMAGE_PATH)
Downloading...
From: https://drive.google.com/uc?export=download&id=1Fd_qrsfrUtuJFpbBnd9C60hBPdbvVgzT
To: /content/Tensorflow/testing/test_images/test_image.png
100%|██████████| 184k/184k [00:00<00:00, 73.1MB/s]
Model runtime: 0.048 seconds
In [ ]:
gdown.download("https://drive.google.com/uc?export=download&id=1GI74d5XWs3zcui6Eyv-HBZ91qlZT4qVf", TEST_IMAGE_PATH)
plot_detections(TEST_IMAGE_PATH)
Downloading...
From: https://drive.google.com/uc?export=download&id=1GI74d5XWs3zcui6Eyv-HBZ91qlZT4qVf
To: /content/Tensorflow/testing/test_images/test_image.png
100%|██████████| 240k/240k [00:00<00:00, 72.4MB/s]
Model runtime: 0.048 seconds
In [ ]:
gdown.download("https://drive.google.com/uc?export=download&id=1GNAdKNAGOGnQfwdSNWlpOnhqnZJOQB9q", TEST_IMAGE_PATH)
plot_detections(TEST_IMAGE_PATH)
Downloading...
From: https://drive.google.com/uc?export=download&id=1GNAdKNAGOGnQfwdSNWlpOnhqnZJOQB9q
To: /content/Tensorflow/testing/test_images/test_image.png
100%|██████████| 14.1k/14.1k [00:00<00:00, 39.2MB/s]
Model runtime: 0.041 seconds

In [ ]: